code
stringlengths 81
54k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
---|---|---|---|---|
'''simple docstring'''
import re
def __lowerCamelCase ( _lowercase ) -> list:
return [char.split() for char in re.split(R"""[^ a-z A-Z 0-9 \s]""" , str_ )]
def __lowerCamelCase ( _lowercase ) -> str:
UpperCAmelCase : Optional[int] = split_input(str_ )
return "".join(
["""""".join([char.capitalize() for char in sub_str] ) for sub_str in string_split] )
def __lowerCamelCase ( _lowercase , _lowercase , _lowercase ) -> str:
try:
UpperCAmelCase : Tuple = split_input(_lowercase )
if upper:
UpperCAmelCase : int = """""".join(
[
separator.join([char.upper() for char in sub_str] )
for sub_str in string_split
] )
else:
UpperCAmelCase : List[str] = """""".join(
[
separator.join([char.lower() for char in sub_str] )
for sub_str in string_split
] )
return res_str
except IndexError:
return "not valid string"
def __lowerCamelCase ( _lowercase ) -> str:
return to_simple_case(_lowercase )
def __lowerCamelCase ( _lowercase ) -> str:
try:
UpperCAmelCase : Union[str, Any] = to_simple_case(_lowercase )
return res_str[0].lower() + res_str[1:]
except IndexError:
return "not valid string"
def __lowerCamelCase ( _lowercase , _lowercase ) -> str:
return to_complex_case(_lowercase , _lowercase , """_""" )
def __lowerCamelCase ( _lowercase , _lowercase ) -> str:
return to_complex_case(_lowercase , _lowercase , """-""" )
if __name__ == "__main__":
__import__("""doctest""").testmod()
| 672 |
'''simple docstring'''
import math
def __lowerCamelCase ( _lowercase ) -> bool:
assert isinstance(_lowercase , _lowercase ) and (
number >= 0
), "'number' must been an int and positive"
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or not number % 2:
# Negatives, 0, 1 and all even numbers are not primes
return False
UpperCAmelCase : str = range(3 , int(math.sqrt(_lowercase ) + 1 ) , 2 )
return not any(not number % i for i in odd_numbers )
def __lowerCamelCase ( _lowercase , _lowercase=1 , **_lowercase ) -> Union[str, Any]:
UpperCAmelCase : Optional[int] = factor * value
UpperCAmelCase : List[Any] = value
while not is_prime(_lowercase ):
value += 1 if not ("desc" in kwargs and kwargs["desc"] is True) else -1
if value == first_value_val:
return next_prime(value + 1 , **_lowercase )
return value
| 672 | 1 |
'''simple docstring'''
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...models.auto.modeling_auto import MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
from ...utils import logging
from ..auto import CONFIG_MAPPING
a : List[Any] = logging.get_logger(__name__)
a : List[Any] = {
"""salesforce/blip2-opt-2.7b""": """https://huggingface.co/salesforce/blip2-opt-2.7b/resolve/main/config.json""",
}
class UpperCamelCase_ ( __magic_name__ ):
lowercase = 'blip_2_vision_model'
def __init__( self , A=1408 , A=6144 , A=39 , A=16 , A=224 , A=14 , A="gelu" , A=0.0_0_0_0_1 , A=0.0 , A=1e-10 , A=True , **A , ) -> str:
super().__init__(**A )
UpperCAmelCase : List[Any] = hidden_size
UpperCAmelCase : List[Any] = intermediate_size
UpperCAmelCase : Optional[int] = num_hidden_layers
UpperCAmelCase : Union[str, Any] = num_attention_heads
UpperCAmelCase : Dict = patch_size
UpperCAmelCase : Optional[Any] = image_size
UpperCAmelCase : Dict = initializer_range
UpperCAmelCase : List[Any] = attention_dropout
UpperCAmelCase : Optional[Any] = layer_norm_eps
UpperCAmelCase : str = hidden_act
UpperCAmelCase : Optional[Any] = qkv_bias
@classmethod
def _lowercase( cls , A , **A ) -> "PretrainedConfig":
cls._set_token_in_kwargs(A )
UpperCAmelCase , UpperCAmelCase : int = cls.get_config_dict(A , **A )
# get the vision config dict if we are loading from Blip2Config
if config_dict.get("""model_type""" ) == "blip-2":
UpperCAmelCase : Dict = config_dict["""vision_config"""]
if "model_type" in config_dict and hasattr(cls , """model_type""" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f'''You are using a model of type {config_dict['model_type']} to instantiate a model of type '''
f'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(A , **A )
class UpperCamelCase_ ( __magic_name__ ):
lowercase = 'blip_2_qformer'
def __init__( self , A=30522 , A=768 , A=12 , A=12 , A=3072 , A="gelu" , A=0.1 , A=0.1 , A=512 , A=0.0_2 , A=1e-12 , A=0 , A="absolute" , A=2 , A=1408 , **A , ) -> List[str]:
super().__init__(pad_token_id=A , **A )
UpperCAmelCase : Any = vocab_size
UpperCAmelCase : List[Any] = hidden_size
UpperCAmelCase : Optional[Any] = num_hidden_layers
UpperCAmelCase : int = num_attention_heads
UpperCAmelCase : Any = hidden_act
UpperCAmelCase : Union[str, Any] = intermediate_size
UpperCAmelCase : Tuple = hidden_dropout_prob
UpperCAmelCase : str = attention_probs_dropout_prob
UpperCAmelCase : Any = max_position_embeddings
UpperCAmelCase : Optional[int] = initializer_range
UpperCAmelCase : str = layer_norm_eps
UpperCAmelCase : List[Any] = position_embedding_type
UpperCAmelCase : List[str] = cross_attention_frequency
UpperCAmelCase : Union[str, Any] = encoder_hidden_size
@classmethod
def _lowercase( cls , A , **A ) -> "PretrainedConfig":
cls._set_token_in_kwargs(A )
UpperCAmelCase , UpperCAmelCase : List[str] = cls.get_config_dict(A , **A )
# get the qformer config dict if we are loading from Blip2Config
if config_dict.get("""model_type""" ) == "blip-2":
UpperCAmelCase : Tuple = config_dict["""qformer_config"""]
if "model_type" in config_dict and hasattr(cls , """model_type""" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f'''You are using a model of type {config_dict['model_type']} to instantiate a model of type '''
f'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(A , **A )
class UpperCamelCase_ ( __magic_name__ ):
lowercase = 'blip-2'
lowercase = True
def __init__( self , A=None , A=None , A=None , A=32 , **A ) -> Union[str, Any]:
super().__init__(**A )
if vision_config is None:
UpperCAmelCase : int = {}
logger.info("""vision_config is None. initializing the Blip2VisionConfig with default values.""" )
if qformer_config is None:
UpperCAmelCase : Any = {}
logger.info("""qformer_config is None. Initializing the Blip2QFormerConfig with default values.""" )
if text_config is None:
UpperCAmelCase : int = {}
logger.info("""text_config is None. Initializing the text config with default values (`OPTConfig`).""" )
UpperCAmelCase : str = BlipaVisionConfig(**A )
UpperCAmelCase : Dict = BlipaQFormerConfig(**A )
UpperCAmelCase : Any = text_config["""model_type"""] if """model_type""" in text_config else """opt"""
UpperCAmelCase : List[Any] = CONFIG_MAPPING[text_model_type](**A )
UpperCAmelCase : int = self.text_config.tie_word_embeddings
UpperCAmelCase : Tuple = self.text_config.is_encoder_decoder
UpperCAmelCase : Optional[int] = num_query_tokens
UpperCAmelCase : Tuple = self.vision_config.hidden_size
UpperCAmelCase : Optional[Any] = self.text_config.model_type in MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
UpperCAmelCase : int = 1.0
UpperCAmelCase : Optional[Any] = 0.0_2
@classmethod
def _lowercase( cls , A , A , A , **A , ) -> Union[str, Any]:
return cls(
vision_config=vision_config.to_dict() , qformer_config=qformer_config.to_dict() , text_config=text_config.to_dict() , **A , )
def _lowercase( self ) -> Optional[Any]:
UpperCAmelCase : str = copy.deepcopy(self.__dict__ )
UpperCAmelCase : Any = self.vision_config.to_dict()
UpperCAmelCase : Any = self.qformer_config.to_dict()
UpperCAmelCase : Any = self.text_config.to_dict()
UpperCAmelCase : str = self.__class__.model_type
return output
| 672 |
'''simple docstring'''
def __lowerCamelCase ( _lowercase = "The quick brown fox jumps over the lazy dog" , ) -> bool:
UpperCAmelCase : Union[str, Any] = set()
# Replace all the whitespace in our sentence
UpperCAmelCase : List[str] = input_str.replace(""" """ , """""" )
for alpha in input_str:
if "a" <= alpha.lower() <= "z":
frequency.add(alpha.lower() )
return len(_lowercase ) == 2_6
def __lowerCamelCase ( _lowercase = "The quick brown fox jumps over the lazy dog" , ) -> bool:
UpperCAmelCase : Tuple = [False] * 2_6
for char in input_str:
if char.islower():
UpperCAmelCase : Any = True
elif char.isupper():
UpperCAmelCase : Union[str, Any] = True
return all(_lowercase )
def __lowerCamelCase ( _lowercase = "The quick brown fox jumps over the lazy dog" , ) -> bool:
return len({char for char in input_str.lower() if char.isalpha()} ) == 2_6
def __lowerCamelCase ( ) -> None:
from timeit import timeit
UpperCAmelCase : str = """from __main__ import is_pangram, is_pangram_faster, is_pangram_fastest"""
print(timeit("""is_pangram()""" , setup=_lowercase ) )
print(timeit("""is_pangram_faster()""" , setup=_lowercase ) )
print(timeit("""is_pangram_fastest()""" , setup=_lowercase ) )
# 5.348480500048026, 2.6477354579837993, 1.8470395830227062
# 5.036091582966037, 2.644472333951853, 1.8869528750656173
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 672 | 1 |
'''simple docstring'''
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionImageVariationPipeline
from diffusers.utils.testing_utils import load_image, require_torch_gpu, slow, torch_device
a : Tuple = False
class UpperCamelCase_ ( unittest.TestCase ):
pass
@slow
@require_torch_gpu
class UpperCamelCase_ ( unittest.TestCase ):
def _lowercase( self ) -> Optional[Any]:
UpperCAmelCase : Any = VersatileDiffusionImageVariationPipeline.from_pretrained("""shi-labs/versatile-diffusion""" )
pipe.to(A )
pipe.set_progress_bar_config(disable=A )
UpperCAmelCase : List[str] = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg""" )
UpperCAmelCase : List[Any] = torch.manual_seed(0 )
UpperCAmelCase : List[str] = pipe(
image=A , generator=A , guidance_scale=7.5 , num_inference_steps=50 , output_type="""numpy""" , ).images
UpperCAmelCase : Tuple = image[0, 253:256, 253:256, -1]
assert image.shape == (1, 512, 512, 3)
UpperCAmelCase : List[str] = np.array([0.0_4_4_1, 0.0_4_6_9, 0.0_5_0_7, 0.0_5_7_5, 0.0_6_3_2, 0.0_6_5_0, 0.0_8_6_5, 0.0_9_0_9, 0.0_9_4_5] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 672 |
'''simple docstring'''
from sklearn.metrics import mean_squared_error
import datasets
a : Union[str, Any] = """\
@article{scikit-learn,
title={Scikit-learn: Machine Learning in {P}ython},
author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V.
and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P.
and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and
Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.},
journal={Journal of Machine Learning Research},
volume={12},
pages={2825--2830},
year={2011}
}
"""
a : int = """\
Mean Squared Error(MSE) is the average of the square of difference between the predicted
and actual values.
"""
a : int = """
Args:
predictions: array-like of shape (n_samples,) or (n_samples, n_outputs)
Estimated target values.
references: array-like of shape (n_samples,) or (n_samples, n_outputs)
Ground truth (correct) target values.
sample_weight: array-like of shape (n_samples,), default=None
Sample weights.
multioutput: {\"raw_values\", \"uniform_average\"} or array-like of shape (n_outputs,), default=\"uniform_average\"
Defines aggregating of multiple output values. Array-like value defines weights used to average errors.
\"raw_values\" : Returns a full set of errors in case of multioutput input.
\"uniform_average\" : Errors of all outputs are averaged with uniform weight.
squared : bool, default=True
If True returns MSE value, if False returns RMSE (Root Mean Squared Error) value.
Returns:
mse : mean squared error.
Examples:
>>> mse_metric = datasets.load_metric(\"mse\")
>>> predictions = [2.5, 0.0, 2, 8]
>>> references = [3, -0.5, 2, 7]
>>> results = mse_metric.compute(predictions=predictions, references=references)
>>> print(results)
{'mse': 0.375}
>>> rmse_result = mse_metric.compute(predictions=predictions, references=references, squared=False)
>>> print(rmse_result)
{'mse': 0.6123724356957945}
If you're using multi-dimensional lists, then set the config as follows :
>>> mse_metric = datasets.load_metric(\"mse\", \"multilist\")
>>> predictions = [[0.5, 1], [-1, 1], [7, -6]]
>>> references = [[0, 2], [-1, 2], [8, -5]]
>>> results = mse_metric.compute(predictions=predictions, references=references)
>>> print(results)
{'mse': 0.7083333333333334}
>>> results = mse_metric.compute(predictions=predictions, references=references, multioutput='raw_values')
>>> print(results) # doctest: +NORMALIZE_WHITESPACE
{'mse': array([0.41666667, 1. ])}
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class UpperCamelCase_ ( datasets.Metric ):
def _lowercase( self ) -> List[str]:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(self._get_feature_types() ) , reference_urls=[
"""https://scikit-learn.org/stable/modules/generated/sklearn.metrics.mean_squared_error.html"""
] , )
def _lowercase( self ) -> List[Any]:
if self.config_name == "multilist":
return {
"predictions": datasets.Sequence(datasets.Value("""float""" ) ),
"references": datasets.Sequence(datasets.Value("""float""" ) ),
}
else:
return {
"predictions": datasets.Value("""float""" ),
"references": datasets.Value("""float""" ),
}
def _lowercase( self , A , A , A=None , A="uniform_average" , A=True ) -> List[Any]:
UpperCAmelCase : List[Any] = mean_squared_error(
A , A , sample_weight=A , multioutput=A , squared=A )
return {"mse": mse}
| 672 | 1 |
'''simple docstring'''
from abc import ABC, abstractmethod
from argparse import ArgumentParser
class UpperCamelCase_ ( __magic_name__ ):
@staticmethod
@abstractmethod
def _lowercase( A ) -> List[Any]:
raise NotImplementedError()
@abstractmethod
def _lowercase( self ) -> Union[str, Any]:
raise NotImplementedError()
| 672 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a : str = logging.get_logger(__name__)
a : Any = {
"""google/fnet-base""": """https://huggingface.co/google/fnet-base/resolve/main/config.json""",
"""google/fnet-large""": """https://huggingface.co/google/fnet-large/resolve/main/config.json"""
# See all FNet models at https://huggingface.co/models?filter=fnet
}
class UpperCamelCase_ ( __magic_name__ ):
lowercase = 'fnet'
def __init__( self , A=32000 , A=768 , A=12 , A=3072 , A="gelu_new" , A=0.1 , A=512 , A=4 , A=0.0_2 , A=1e-12 , A=False , A=512 , A=3 , A=1 , A=2 , **A , ) -> int:
super().__init__(pad_token_id=A , bos_token_id=A , eos_token_id=A , **A )
UpperCAmelCase : Optional[Any] = vocab_size
UpperCAmelCase : Dict = max_position_embeddings
UpperCAmelCase : List[str] = hidden_size
UpperCAmelCase : int = num_hidden_layers
UpperCAmelCase : Any = intermediate_size
UpperCAmelCase : List[str] = hidden_act
UpperCAmelCase : Tuple = hidden_dropout_prob
UpperCAmelCase : List[str] = initializer_range
UpperCAmelCase : List[Any] = type_vocab_size
UpperCAmelCase : int = layer_norm_eps
UpperCAmelCase : Optional[Any] = use_tpu_fourier_optimizations
UpperCAmelCase : List[Any] = tpu_short_seq_length
| 672 | 1 |
'''simple docstring'''
import doctest
import logging
import os
import unittest
from pathlib import Path
from typing import List, Union
import transformers
from transformers.testing_utils import require_tf, require_torch, slow
a : List[Any] = logging.getLogger()
@unittest.skip('Temporarily disable the doc tests.' )
@require_torch
@require_tf
@slow
class UpperCamelCase_ ( unittest.TestCase ):
def _lowercase( self , A , A = None , A = None , A = None , A = True , ) -> Tuple:
UpperCAmelCase : Dict = [file for file in os.listdir(A ) if os.path.isfile(os.path.join(A , A ) )]
if identifier is not None:
UpperCAmelCase : Any = [file for file in files if identifier in file]
if n_identifier is not None:
if isinstance(A , A ):
for n_ in n_identifier:
UpperCAmelCase : Optional[int] = [file for file in files if n_ not in file]
else:
UpperCAmelCase : int = [file for file in files if n_identifier not in file]
UpperCAmelCase : Dict = ignore_files or []
ignore_files.append("""__init__.py""" )
UpperCAmelCase : List[Any] = [file for file in files if file not in ignore_files]
for file in files:
# Open all files
print("""Testing""" , A )
if only_modules:
UpperCAmelCase : Optional[int] = file.split(""".""" )[0]
try:
UpperCAmelCase : Any = getattr(A , A )
UpperCAmelCase : str = doctest.DocTestSuite(A )
UpperCAmelCase : List[Any] = unittest.TextTestRunner().run(A )
self.assertIs(len(result.failures ) , 0 )
except AttributeError:
logger.info(f'''{module_identifier} is not a module.''' )
else:
UpperCAmelCase : Any = doctest.testfile(str("""..""" / directory / file ) , optionflags=doctest.ELLIPSIS )
self.assertIs(result.failed , 0 )
def _lowercase( self ) -> str:
UpperCAmelCase : Optional[int] = Path("""src/transformers""" )
UpperCAmelCase : Tuple = """modeling"""
UpperCAmelCase : Dict = [
"""modeling_ctrl.py""",
"""modeling_tf_ctrl.py""",
]
self.analyze_directory(A , identifier=A , ignore_files=A )
def _lowercase( self ) -> str:
UpperCAmelCase : Tuple = Path("""src/transformers""" )
UpperCAmelCase : Dict = """tokenization"""
self.analyze_directory(A , identifier=A )
def _lowercase( self ) -> Union[str, Any]:
UpperCAmelCase : Any = Path("""src/transformers""" )
UpperCAmelCase : Optional[Any] = """configuration"""
self.analyze_directory(A , identifier=A )
def _lowercase( self ) -> str:
UpperCAmelCase : List[str] = Path("""src/transformers""" )
UpperCAmelCase : Optional[Any] = ["""configuration""", """modeling""", """tokenization"""]
self.analyze_directory(A , n_identifier=A )
def _lowercase( self ) -> Tuple:
UpperCAmelCase : List[str] = Path("""docs/source""" )
UpperCAmelCase : Union[str, Any] = ["""favicon.ico"""]
self.analyze_directory(A , ignore_files=A , only_modules=A )
| 672 |
'''simple docstring'''
a : List[Any] = """Alexander Joslin"""
import operator as op
from .stack import Stack
def __lowerCamelCase ( _lowercase ) -> int:
UpperCAmelCase : Dict = {"""*""": op.mul, """/""": op.truediv, """+""": op.add, """-""": op.sub}
UpperCAmelCase : Stack[int] = Stack()
UpperCAmelCase : Stack[str] = Stack()
for i in equation:
if i.isdigit():
# RULE 1
operand_stack.push(int(_lowercase ) )
elif i in operators:
# RULE 2
operator_stack.push(_lowercase )
elif i == ")":
# RULE 4
UpperCAmelCase : List[Any] = operator_stack.peek()
operator_stack.pop()
UpperCAmelCase : str = operand_stack.peek()
operand_stack.pop()
UpperCAmelCase : str = operand_stack.peek()
operand_stack.pop()
UpperCAmelCase : List[Any] = operators[opr](_lowercase , _lowercase )
operand_stack.push(_lowercase )
# RULE 5
return operand_stack.peek()
if __name__ == "__main__":
a : Tuple = """(5 + ((4 * 2) * (2 + 3)))"""
# answer = 45
print(F'''{equation} = {dijkstras_two_stack_algorithm(equation)}''')
| 672 | 1 |
'''simple docstring'''
import numpy as np
def __lowerCamelCase ( _lowercase ) -> np.array:
return (2 / (1 + np.exp(-2 * vector ))) - 1
if __name__ == "__main__":
import doctest
doctest.testmod()
| 672 |
'''simple docstring'''
import argparse
import json
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
ConditionalDetrConfig,
ConditionalDetrForObjectDetection,
ConditionalDetrForSegmentation,
ConditionalDetrImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
a : List[Any] = logging.get_logger(__name__)
# here we list all keys to be renamed (original name on the left, our name on the right)
a : List[str] = []
for i in range(6):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(F'''transformer.encoder.layers.{i}.self_attn.out_proj.weight''', F'''encoder.layers.{i}.self_attn.out_proj.weight''')
)
rename_keys.append(
(F'''transformer.encoder.layers.{i}.self_attn.out_proj.bias''', F'''encoder.layers.{i}.self_attn.out_proj.bias''')
)
rename_keys.append((F'''transformer.encoder.layers.{i}.linear1.weight''', F'''encoder.layers.{i}.fc1.weight'''))
rename_keys.append((F'''transformer.encoder.layers.{i}.linear1.bias''', F'''encoder.layers.{i}.fc1.bias'''))
rename_keys.append((F'''transformer.encoder.layers.{i}.linear2.weight''', F'''encoder.layers.{i}.fc2.weight'''))
rename_keys.append((F'''transformer.encoder.layers.{i}.linear2.bias''', F'''encoder.layers.{i}.fc2.bias'''))
rename_keys.append(
(F'''transformer.encoder.layers.{i}.norm1.weight''', F'''encoder.layers.{i}.self_attn_layer_norm.weight''')
)
rename_keys.append((F'''transformer.encoder.layers.{i}.norm1.bias''', F'''encoder.layers.{i}.self_attn_layer_norm.bias'''))
rename_keys.append((F'''transformer.encoder.layers.{i}.norm2.weight''', F'''encoder.layers.{i}.final_layer_norm.weight'''))
rename_keys.append((F'''transformer.encoder.layers.{i}.norm2.bias''', F'''encoder.layers.{i}.final_layer_norm.bias'''))
# decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms
rename_keys.append(
(F'''transformer.decoder.layers.{i}.self_attn.out_proj.weight''', F'''decoder.layers.{i}.self_attn.out_proj.weight''')
)
rename_keys.append(
(F'''transformer.decoder.layers.{i}.self_attn.out_proj.bias''', F'''decoder.layers.{i}.self_attn.out_proj.bias''')
)
rename_keys.append(
(
F'''transformer.decoder.layers.{i}.cross_attn.out_proj.weight''',
F'''decoder.layers.{i}.encoder_attn.out_proj.weight''',
)
)
rename_keys.append(
(
F'''transformer.decoder.layers.{i}.cross_attn.out_proj.bias''',
F'''decoder.layers.{i}.encoder_attn.out_proj.bias''',
)
)
rename_keys.append((F'''transformer.decoder.layers.{i}.linear1.weight''', F'''decoder.layers.{i}.fc1.weight'''))
rename_keys.append((F'''transformer.decoder.layers.{i}.linear1.bias''', F'''decoder.layers.{i}.fc1.bias'''))
rename_keys.append((F'''transformer.decoder.layers.{i}.linear2.weight''', F'''decoder.layers.{i}.fc2.weight'''))
rename_keys.append((F'''transformer.decoder.layers.{i}.linear2.bias''', F'''decoder.layers.{i}.fc2.bias'''))
rename_keys.append(
(F'''transformer.decoder.layers.{i}.norm1.weight''', F'''decoder.layers.{i}.self_attn_layer_norm.weight''')
)
rename_keys.append((F'''transformer.decoder.layers.{i}.norm1.bias''', F'''decoder.layers.{i}.self_attn_layer_norm.bias'''))
rename_keys.append(
(F'''transformer.decoder.layers.{i}.norm2.weight''', F'''decoder.layers.{i}.encoder_attn_layer_norm.weight''')
)
rename_keys.append(
(F'''transformer.decoder.layers.{i}.norm2.bias''', F'''decoder.layers.{i}.encoder_attn_layer_norm.bias''')
)
rename_keys.append((F'''transformer.decoder.layers.{i}.norm3.weight''', F'''decoder.layers.{i}.final_layer_norm.weight'''))
rename_keys.append((F'''transformer.decoder.layers.{i}.norm3.bias''', F'''decoder.layers.{i}.final_layer_norm.bias'''))
# q, k, v projections in self/cross-attention in decoder for conditional DETR
rename_keys.append(
(F'''transformer.decoder.layers.{i}.sa_qcontent_proj.weight''', F'''decoder.layers.{i}.sa_qcontent_proj.weight''')
)
rename_keys.append(
(F'''transformer.decoder.layers.{i}.sa_kcontent_proj.weight''', F'''decoder.layers.{i}.sa_kcontent_proj.weight''')
)
rename_keys.append(
(F'''transformer.decoder.layers.{i}.sa_qpos_proj.weight''', F'''decoder.layers.{i}.sa_qpos_proj.weight''')
)
rename_keys.append(
(F'''transformer.decoder.layers.{i}.sa_kpos_proj.weight''', F'''decoder.layers.{i}.sa_kpos_proj.weight''')
)
rename_keys.append((F'''transformer.decoder.layers.{i}.sa_v_proj.weight''', F'''decoder.layers.{i}.sa_v_proj.weight'''))
rename_keys.append(
(F'''transformer.decoder.layers.{i}.ca_qcontent_proj.weight''', F'''decoder.layers.{i}.ca_qcontent_proj.weight''')
)
# rename_keys.append((f"transformer.decoder.layers.{i}.ca_qpos_proj.weight", f"decoder.layers.{i}.ca_qpos_proj.weight"))
rename_keys.append(
(F'''transformer.decoder.layers.{i}.ca_kcontent_proj.weight''', F'''decoder.layers.{i}.ca_kcontent_proj.weight''')
)
rename_keys.append(
(F'''transformer.decoder.layers.{i}.ca_kpos_proj.weight''', F'''decoder.layers.{i}.ca_kpos_proj.weight''')
)
rename_keys.append((F'''transformer.decoder.layers.{i}.ca_v_proj.weight''', F'''decoder.layers.{i}.ca_v_proj.weight'''))
rename_keys.append(
(F'''transformer.decoder.layers.{i}.ca_qpos_sine_proj.weight''', F'''decoder.layers.{i}.ca_qpos_sine_proj.weight''')
)
rename_keys.append(
(F'''transformer.decoder.layers.{i}.sa_qcontent_proj.bias''', F'''decoder.layers.{i}.sa_qcontent_proj.bias''')
)
rename_keys.append(
(F'''transformer.decoder.layers.{i}.sa_kcontent_proj.bias''', F'''decoder.layers.{i}.sa_kcontent_proj.bias''')
)
rename_keys.append((F'''transformer.decoder.layers.{i}.sa_qpos_proj.bias''', F'''decoder.layers.{i}.sa_qpos_proj.bias'''))
rename_keys.append((F'''transformer.decoder.layers.{i}.sa_kpos_proj.bias''', F'''decoder.layers.{i}.sa_kpos_proj.bias'''))
rename_keys.append((F'''transformer.decoder.layers.{i}.sa_v_proj.bias''', F'''decoder.layers.{i}.sa_v_proj.bias'''))
rename_keys.append(
(F'''transformer.decoder.layers.{i}.ca_qcontent_proj.bias''', F'''decoder.layers.{i}.ca_qcontent_proj.bias''')
)
# rename_keys.append((f"transformer.decoder.layers.{i}.ca_qpos_proj.bias", f"decoder.layers.{i}.ca_qpos_proj.bias"))
rename_keys.append(
(F'''transformer.decoder.layers.{i}.ca_kcontent_proj.bias''', F'''decoder.layers.{i}.ca_kcontent_proj.bias''')
)
rename_keys.append((F'''transformer.decoder.layers.{i}.ca_kpos_proj.bias''', F'''decoder.layers.{i}.ca_kpos_proj.bias'''))
rename_keys.append((F'''transformer.decoder.layers.{i}.ca_v_proj.bias''', F'''decoder.layers.{i}.ca_v_proj.bias'''))
rename_keys.append(
(F'''transformer.decoder.layers.{i}.ca_qpos_sine_proj.bias''', F'''decoder.layers.{i}.ca_qpos_sine_proj.bias''')
)
# convolutional projection + query embeddings + layernorm of decoder + class and bounding box heads
# for conditional DETR, also convert reference point head and query scale MLP
rename_keys.extend(
[
("""input_proj.weight""", """input_projection.weight"""),
("""input_proj.bias""", """input_projection.bias"""),
("""query_embed.weight""", """query_position_embeddings.weight"""),
("""transformer.decoder.norm.weight""", """decoder.layernorm.weight"""),
("""transformer.decoder.norm.bias""", """decoder.layernorm.bias"""),
("""class_embed.weight""", """class_labels_classifier.weight"""),
("""class_embed.bias""", """class_labels_classifier.bias"""),
("""bbox_embed.layers.0.weight""", """bbox_predictor.layers.0.weight"""),
("""bbox_embed.layers.0.bias""", """bbox_predictor.layers.0.bias"""),
("""bbox_embed.layers.1.weight""", """bbox_predictor.layers.1.weight"""),
("""bbox_embed.layers.1.bias""", """bbox_predictor.layers.1.bias"""),
("""bbox_embed.layers.2.weight""", """bbox_predictor.layers.2.weight"""),
("""bbox_embed.layers.2.bias""", """bbox_predictor.layers.2.bias"""),
("""transformer.decoder.ref_point_head.layers.0.weight""", """decoder.ref_point_head.layers.0.weight"""),
("""transformer.decoder.ref_point_head.layers.0.bias""", """decoder.ref_point_head.layers.0.bias"""),
("""transformer.decoder.ref_point_head.layers.1.weight""", """decoder.ref_point_head.layers.1.weight"""),
("""transformer.decoder.ref_point_head.layers.1.bias""", """decoder.ref_point_head.layers.1.bias"""),
("""transformer.decoder.query_scale.layers.0.weight""", """decoder.query_scale.layers.0.weight"""),
("""transformer.decoder.query_scale.layers.0.bias""", """decoder.query_scale.layers.0.bias"""),
("""transformer.decoder.query_scale.layers.1.weight""", """decoder.query_scale.layers.1.weight"""),
("""transformer.decoder.query_scale.layers.1.bias""", """decoder.query_scale.layers.1.bias"""),
("""transformer.decoder.layers.0.ca_qpos_proj.weight""", """decoder.layers.0.ca_qpos_proj.weight"""),
("""transformer.decoder.layers.0.ca_qpos_proj.bias""", """decoder.layers.0.ca_qpos_proj.bias"""),
]
)
def __lowerCamelCase ( _lowercase , _lowercase , _lowercase ) -> Optional[Any]:
UpperCAmelCase : List[str] = state_dict.pop(_lowercase )
UpperCAmelCase : List[str] = val
def __lowerCamelCase ( _lowercase ) -> Any:
UpperCAmelCase : Union[str, Any] = OrderedDict()
for key, value in state_dict.items():
if "backbone.0.body" in key:
UpperCAmelCase : List[str] = key.replace("""backbone.0.body""" , """backbone.conv_encoder.model""" )
UpperCAmelCase : Dict = value
else:
UpperCAmelCase : List[Any] = value
return new_state_dict
def __lowerCamelCase ( _lowercase , _lowercase=False ) -> Optional[int]:
UpperCAmelCase : Dict = """"""
if is_panoptic:
UpperCAmelCase : Tuple = """conditional_detr."""
# first: transformer encoder
for i in range(6 ):
# read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias)
UpperCAmelCase : List[Any] = state_dict.pop(F'''{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight''' )
UpperCAmelCase : List[Any] = state_dict.pop(F'''{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias''' )
# next, add query, keys and values (in that order) to the state dict
UpperCAmelCase : Dict = in_proj_weight[:2_5_6, :]
UpperCAmelCase : Optional[Any] = in_proj_bias[:2_5_6]
UpperCAmelCase : List[Any] = in_proj_weight[2_5_6:5_1_2, :]
UpperCAmelCase : Tuple = in_proj_bias[2_5_6:5_1_2]
UpperCAmelCase : List[str] = in_proj_weight[-2_5_6:, :]
UpperCAmelCase : List[str] = in_proj_bias[-2_5_6:]
def __lowerCamelCase ( ) -> Dict:
UpperCAmelCase : List[Any] = """http://images.cocodataset.org/val2017/000000039769.jpg"""
UpperCAmelCase : Tuple = Image.open(requests.get(_lowercase , stream=_lowercase ).raw )
return im
@torch.no_grad()
def __lowerCamelCase ( _lowercase , _lowercase ) -> str:
UpperCAmelCase : str = ConditionalDetrConfig()
# set backbone and dilation attributes
if "resnet101" in model_name:
UpperCAmelCase : List[Any] = """resnet101"""
if "dc5" in model_name:
UpperCAmelCase : Optional[int] = True
UpperCAmelCase : List[Any] = """panoptic""" in model_name
if is_panoptic:
UpperCAmelCase : Union[str, Any] = 2_5_0
else:
UpperCAmelCase : int = 9_1
UpperCAmelCase : Tuple = """huggingface/label-files"""
UpperCAmelCase : List[Any] = """coco-detection-id2label.json"""
UpperCAmelCase : Optional[int] = json.load(open(hf_hub_download(_lowercase , _lowercase , repo_type="""dataset""" ) , """r""" ) )
UpperCAmelCase : Dict = {int(_lowercase ): v for k, v in idalabel.items()}
UpperCAmelCase : Optional[Any] = idalabel
UpperCAmelCase : List[Any] = {v: k for k, v in idalabel.items()}
# load image processor
UpperCAmelCase : List[str] = """coco_panoptic""" if is_panoptic else """coco_detection"""
UpperCAmelCase : List[Any] = ConditionalDetrImageProcessor(format=_lowercase )
# prepare image
UpperCAmelCase : Union[str, Any] = prepare_img()
UpperCAmelCase : Dict = image_processor(images=_lowercase , return_tensors="""pt""" )
UpperCAmelCase : List[Any] = encoding["""pixel_values"""]
logger.info(F'''Converting model {model_name}...''' )
# load original model from torch hub
UpperCAmelCase : int = torch.hub.load("""DeppMeng/ConditionalDETR""" , _lowercase , pretrained=_lowercase ).eval()
UpperCAmelCase : List[Any] = conditional_detr.state_dict()
# rename keys
for src, dest in rename_keys:
if is_panoptic:
UpperCAmelCase : List[Any] = """conditional_detr.""" + src
rename_key(_lowercase , _lowercase , _lowercase )
UpperCAmelCase : List[Any] = rename_backbone_keys(_lowercase )
# query, key and value matrices need special treatment
read_in_q_k_v(_lowercase , is_panoptic=_lowercase )
# important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them
UpperCAmelCase : int = """conditional_detr.model.""" if is_panoptic else """model."""
for key in state_dict.copy().keys():
if is_panoptic:
if (
key.startswith("""conditional_detr""" )
and not key.startswith("""class_labels_classifier""" )
and not key.startswith("""bbox_predictor""" )
):
UpperCAmelCase : Union[str, Any] = state_dict.pop(_lowercase )
UpperCAmelCase : int = val
elif "class_labels_classifier" in key or "bbox_predictor" in key:
UpperCAmelCase : Any = state_dict.pop(_lowercase )
UpperCAmelCase : Optional[Any] = val
elif key.startswith("""bbox_attention""" ) or key.startswith("""mask_head""" ):
continue
else:
UpperCAmelCase : List[Any] = state_dict.pop(_lowercase )
UpperCAmelCase : str = val
else:
if not key.startswith("""class_labels_classifier""" ) and not key.startswith("""bbox_predictor""" ):
UpperCAmelCase : Optional[int] = state_dict.pop(_lowercase )
UpperCAmelCase : Union[str, Any] = val
# finally, create HuggingFace model and load state dict
UpperCAmelCase : List[Any] = ConditionalDetrForSegmentation(_lowercase ) if is_panoptic else ConditionalDetrForObjectDetection(_lowercase )
model.load_state_dict(_lowercase )
model.eval()
model.push_to_hub(repo_id=_lowercase , organization="""DepuMeng""" , commit_message="""Add model""" )
# verify our conversion
UpperCAmelCase : Union[str, Any] = conditional_detr(_lowercase )
UpperCAmelCase : int = model(_lowercase )
assert torch.allclose(outputs.logits , original_outputs["""pred_logits"""] , atol=1e-4 )
assert torch.allclose(outputs.pred_boxes , original_outputs["""pred_boxes"""] , atol=1e-4 )
if is_panoptic:
assert torch.allclose(outputs.pred_masks , original_outputs["""pred_masks"""] , atol=1e-4 )
# Save model and image processor
logger.info(F'''Saving PyTorch model and image processor to {pytorch_dump_folder_path}...''' )
Path(_lowercase ).mkdir(exist_ok=_lowercase )
model.save_pretrained(_lowercase )
image_processor.save_pretrained(_lowercase )
if __name__ == "__main__":
a : Tuple = argparse.ArgumentParser()
parser.add_argument(
"""--model_name""",
default="""conditional_detr_resnet50""",
type=str,
help="""Name of the CONDITIONAL_DETR model you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the folder to output PyTorch model."""
)
a : Optional[Any] = parser.parse_args()
convert_conditional_detr_checkpoint(args.model_name, args.pytorch_dump_folder_path)
| 672 | 1 |
'''simple docstring'''
import math
from typing import Optional
import numpy as np
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a : int = logging.get_logger(__name__)
a : Union[str, Any] = {
"""facebook/encodec_24khz""": """https://huggingface.co/facebook/encodec_24khz/resolve/main/config.json""",
"""facebook/encodec_48khz""": """https://huggingface.co/facebook/encodec_48khz/resolve/main/config.json""",
}
class UpperCamelCase_ ( __magic_name__ ):
lowercase = 'encodec'
def __init__( self , A=[1.5, 3.0, 6.0, 1_2.0, 2_4.0] , A=24000 , A=1 , A=False , A=None , A=None , A=128 , A=32 , A=1 , A=[8, 5, 4, 2] , A="weight_norm" , A=7 , A=7 , A=3 , A=2 , A=True , A="reflect" , A=2 , A=2 , A=1.0 , A=1024 , A=None , A=True , **A , ) -> Optional[Any]:
UpperCAmelCase : int = target_bandwidths
UpperCAmelCase : str = sampling_rate
UpperCAmelCase : Union[str, Any] = audio_channels
UpperCAmelCase : Optional[int] = normalize
UpperCAmelCase : Optional[Any] = chunk_length_s
UpperCAmelCase : Any = overlap
UpperCAmelCase : int = hidden_size
UpperCAmelCase : int = num_filters
UpperCAmelCase : Optional[int] = num_residual_layers
UpperCAmelCase : Optional[Any] = upsampling_ratios
UpperCAmelCase : Optional[int] = norm_type
UpperCAmelCase : Tuple = kernel_size
UpperCAmelCase : str = last_kernel_size
UpperCAmelCase : Optional[Any] = residual_kernel_size
UpperCAmelCase : Optional[int] = dilation_growth_rate
UpperCAmelCase : List[str] = use_causal_conv
UpperCAmelCase : Dict = pad_mode
UpperCAmelCase : Optional[Any] = compress
UpperCAmelCase : int = num_lstm_layers
UpperCAmelCase : Any = trim_right_ratio
UpperCAmelCase : Optional[Any] = codebook_size
UpperCAmelCase : Any = codebook_dim if codebook_dim is not None else hidden_size
UpperCAmelCase : List[str] = use_conv_shortcut
if self.norm_type not in ["weight_norm", "time_group_norm"]:
raise ValueError(
f'''self.norm_type must be one of `"weight_norm"`, `"time_group_norm"`), got {self.norm_type}''' )
super().__init__(**A )
@property
def _lowercase( self ) -> Optional[int]:
if self.chunk_length_s is None:
return None
else:
return int(self.chunk_length_s * self.sampling_rate )
@property
def _lowercase( self ) -> Optional[int]:
if self.chunk_length_s is None or self.overlap is None:
return None
else:
return max(1 , int((1.0 - self.overlap) * self.chunk_length ) )
@property
def _lowercase( self ) -> int:
UpperCAmelCase : List[str] = np.prod(self.upsampling_ratios )
return math.ceil(self.sampling_rate / hop_length )
@property
def _lowercase( self ) -> int:
return int(1000 * self.target_bandwidths[-1] // (self.frame_rate * 10) )
| 672 |
'''simple docstring'''
from ...utils import is_torch_available, is_transformers_available
if is_transformers_available() and is_torch_available():
from .pipeline_vq_diffusion import LearnedClassifierFreeSamplingEmbeddings, VQDiffusionPipeline
| 672 | 1 |
'''simple docstring'''
from collections import deque
def __lowerCamelCase ( _lowercase ) -> Optional[Any]:
UpperCAmelCase : Any = len(_lowercase )
UpperCAmelCase : List[Any] = deque()
UpperCAmelCase : Tuple = [False for _ in range(_lowercase )]
UpperCAmelCase : int = [-1 for _ in range(_lowercase )]
UpperCAmelCase : str = index_of[:]
def strong_connect(_lowercase , _lowercase , _lowercase ):
UpperCAmelCase : Union[str, Any] = index # the number when this node is seen
UpperCAmelCase : Dict = index # lowest rank node reachable from here
index += 1
stack.append(_lowercase )
UpperCAmelCase : Optional[int] = True
for w in g[v]:
if index_of[w] == -1:
UpperCAmelCase : str = strong_connect(_lowercase , _lowercase , _lowercase )
UpperCAmelCase : Optional[Any] = (
lowlink_of[w] if lowlink_of[w] < lowlink_of[v] else lowlink_of[v]
)
elif on_stack[w]:
UpperCAmelCase : Optional[Any] = (
lowlink_of[w] if lowlink_of[w] < lowlink_of[v] else lowlink_of[v]
)
if lowlink_of[v] == index_of[v]:
UpperCAmelCase : Any = []
UpperCAmelCase : List[Any] = stack.pop()
UpperCAmelCase : Tuple = False
component.append(_lowercase )
while w != v:
UpperCAmelCase : Union[str, Any] = stack.pop()
UpperCAmelCase : Optional[Any] = False
component.append(_lowercase )
components.append(_lowercase )
return index
UpperCAmelCase : Tuple = []
for v in range(_lowercase ):
if index_of[v] == -1:
strong_connect(_lowercase , 0 , _lowercase )
return components
def __lowerCamelCase ( _lowercase , _lowercase ) -> int:
UpperCAmelCase : Tuple = [[] for _ in range(_lowercase )]
for u, v in edges:
g[u].append(_lowercase )
return g
if __name__ == "__main__":
# Test
a : Union[str, Any] = 7
a : List[str] = [0, 0, 1, 2, 3, 3, 4, 4, 6]
a : List[Any] = [1, 3, 2, 0, 1, 4, 5, 6, 5]
a : int = [(u, v) for u, v in zip(source, target)]
a : Any = create_graph(n_vertices, edges)
assert [[5], [6], [4], [3, 2, 1, 0]] == tarjan(g)
| 672 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_barthez import BarthezTokenizer
else:
a : int = None
a : List[Any] = logging.get_logger(__name__)
a : Dict = {"""vocab_file""": """sentencepiece.bpe.model""", """tokenizer_file""": """tokenizer.json"""}
a : Union[str, Any] = {
"""vocab_file""": {
"""moussaKam/mbarthez""": """https://huggingface.co/moussaKam/mbarthez/resolve/main/sentencepiece.bpe.model""",
"""moussaKam/barthez""": """https://huggingface.co/moussaKam/barthez/resolve/main/sentencepiece.bpe.model""",
"""moussaKam/barthez-orangesum-title""": (
"""https://huggingface.co/moussaKam/barthez-orangesum-title/resolve/main/sentencepiece.bpe.model"""
),
},
"""tokenizer_file""": {
"""moussaKam/mbarthez""": """https://huggingface.co/moussaKam/mbarthez/resolve/main/tokenizer.json""",
"""moussaKam/barthez""": """https://huggingface.co/moussaKam/barthez/resolve/main/tokenizer.json""",
"""moussaKam/barthez-orangesum-title""": (
"""https://huggingface.co/moussaKam/barthez-orangesum-title/resolve/main/tokenizer.json"""
),
},
}
a : List[Any] = {
"""moussaKam/mbarthez""": 1_0_2_4,
"""moussaKam/barthez""": 1_0_2_4,
"""moussaKam/barthez-orangesum-title""": 1_0_2_4,
}
a : int = """▁"""
class UpperCamelCase_ ( __magic_name__ ):
lowercase = VOCAB_FILES_NAMES
lowercase = PRETRAINED_VOCAB_FILES_MAP
lowercase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase = ['input_ids', 'attention_mask']
lowercase = BarthezTokenizer
def __init__( self , A=None , A=None , A="<s>" , A="</s>" , A="</s>" , A="<s>" , A="<unk>" , A="<pad>" , A="<mask>" , **A , ) -> List[Any]:
# Mask token behave like a normal word, i.e. include the space before it
UpperCAmelCase : Tuple = AddedToken(A , lstrip=A , rstrip=A ) if isinstance(A , A ) else mask_token
super().__init__(
A , tokenizer_file=A , bos_token=A , eos_token=A , unk_token=A , sep_token=A , cls_token=A , pad_token=A , mask_token=A , **A , )
UpperCAmelCase : Union[str, Any] = vocab_file
UpperCAmelCase : int = False if not self.vocab_file else True
def _lowercase( self , A , A = None ) -> List[int]:
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
UpperCAmelCase : Optional[int] = [self.cls_token_id]
UpperCAmelCase : List[str] = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def _lowercase( self , A , A = None ) -> List[int]:
UpperCAmelCase : Optional[int] = [self.sep_token_id]
UpperCAmelCase : List[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def _lowercase( self , A , A = None ) -> Tuple[str]:
if not self.can_save_slow_tokenizer:
raise ValueError(
"""Your fast tokenizer does not have the necessary information to save the vocabulary for a slow """
"""tokenizer.""" )
if not os.path.isdir(A ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
UpperCAmelCase : str = os.path.join(
A , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(A ):
copyfile(self.vocab_file , A )
return (out_vocab_file,)
| 672 | 1 |
'''simple docstring'''
from math import isqrt, loga
def __lowerCamelCase ( _lowercase ) -> list[int]:
UpperCAmelCase : Optional[int] = [True] * max_number
for i in range(2 , isqrt(max_number - 1 ) + 1 ):
if is_prime[i]:
for j in range(i**2 , _lowercase , _lowercase ):
UpperCAmelCase : Union[str, Any] = False
return [i for i in range(2 , _lowercase ) if is_prime[i]]
def __lowerCamelCase ( _lowercase = 8_0_0_8_0_0 , _lowercase = 8_0_0_8_0_0 ) -> int:
UpperCAmelCase : int = degree * loga(_lowercase )
UpperCAmelCase : Dict = int(_lowercase )
UpperCAmelCase : List[str] = calculate_prime_numbers(_lowercase )
UpperCAmelCase : int = 0
UpperCAmelCase : int = 0
UpperCAmelCase : List[str] = len(_lowercase ) - 1
while left < right:
while (
prime_numbers[right] * loga(prime_numbers[left] )
+ prime_numbers[left] * loga(prime_numbers[right] )
> upper_bound
):
right -= 1
hybrid_integers_count += right - left
left += 1
return hybrid_integers_count
if __name__ == "__main__":
print(F'''{solution() = }''')
| 672 |
'''simple docstring'''
from collections.abc import Callable
import numpy as np
def __lowerCamelCase ( _lowercase , _lowercase , _lowercase , _lowercase , _lowercase ) -> np.array:
UpperCAmelCase : Optional[Any] = int(np.ceil((x_end - xa) / step_size ) )
UpperCAmelCase : str = np.zeros((n + 1,) )
UpperCAmelCase : Optional[Any] = ya
UpperCAmelCase : Union[str, Any] = xa
for k in range(_lowercase ):
UpperCAmelCase : Dict = y[k] + step_size * ode_func(_lowercase , y[k] )
UpperCAmelCase : Optional[int] = y[k] + (
(step_size / 2) * (ode_func(_lowercase , y[k] ) + ode_func(x + step_size , _lowercase ))
)
x += step_size
return y
if __name__ == "__main__":
import doctest
doctest.testmod()
| 672 | 1 |
'''simple docstring'''
class UpperCamelCase_ :
def __init__( self ) -> Union[str, Any]:
UpperCAmelCase : Optional[int] = 0
UpperCAmelCase : int = 0
UpperCAmelCase : Dict = {}
def _lowercase( self , A ) -> Tuple:
if vertex not in self.adjacency:
UpperCAmelCase : Optional[Any] = {}
self.num_vertices += 1
def _lowercase( self , A , A , A ) -> Optional[int]:
self.add_vertex(A )
self.add_vertex(A )
if head == tail:
return
UpperCAmelCase : Union[str, Any] = weight
UpperCAmelCase : Tuple = weight
def _lowercase( self ) -> str:
UpperCAmelCase : str = self.get_edges()
for edge in edges:
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : str = edge
edges.remove((tail, head, weight) )
for i in range(len(A ) ):
UpperCAmelCase : int = list(edges[i] )
edges.sort(key=lambda A : e[2] )
for i in range(len(A ) - 1 ):
if edges[i][2] >= edges[i + 1][2]:
UpperCAmelCase : str = edges[i][2] + 1
for edge in edges:
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : str = edge
UpperCAmelCase : Dict = weight
UpperCAmelCase : int = weight
def __str__( self ) -> int:
UpperCAmelCase : Dict = """"""
for tail in self.adjacency:
for head in self.adjacency[tail]:
UpperCAmelCase : Union[str, Any] = self.adjacency[head][tail]
string += f'''{head} -> {tail} == {weight}\n'''
return string.rstrip("""\n""" )
def _lowercase( self ) -> str:
UpperCAmelCase : Optional[int] = []
for tail in self.adjacency:
for head in self.adjacency[tail]:
output.append((tail, head, self.adjacency[head][tail]) )
return output
def _lowercase( self ) -> Tuple:
return self.adjacency.keys()
@staticmethod
def _lowercase( A=None , A=None ) -> str:
UpperCAmelCase : Tuple = Graph()
if vertices is None:
UpperCAmelCase : Any = []
if edges is None:
UpperCAmelCase : str = []
for vertex in vertices:
g.add_vertex(A )
for edge in edges:
g.add_edge(*A )
return g
class UpperCamelCase_ :
def __init__( self ) -> Dict:
UpperCAmelCase : Any = {}
UpperCAmelCase : List[Any] = {}
def __len__( self ) -> Optional[Any]:
return len(self.parent )
def _lowercase( self , A ) -> Union[str, Any]:
if item in self.parent:
return self.find(A )
UpperCAmelCase : Union[str, Any] = item
UpperCAmelCase : Any = 0
return item
def _lowercase( self , A ) -> Optional[Any]:
if item not in self.parent:
return self.make_set(A )
if item != self.parent[item]:
UpperCAmelCase : List[str] = self.find(self.parent[item] )
return self.parent[item]
def _lowercase( self , A , A ) -> Optional[Any]:
UpperCAmelCase : List[str] = self.find(A )
UpperCAmelCase : List[str] = self.find(A )
if roota == roota:
return roota
if self.rank[roota] > self.rank[roota]:
UpperCAmelCase : Optional[Any] = roota
return roota
if self.rank[roota] < self.rank[roota]:
UpperCAmelCase : int = roota
return roota
if self.rank[roota] == self.rank[roota]:
self.rank[roota] += 1
UpperCAmelCase : int = roota
return roota
return None
@staticmethod
def _lowercase( A ) -> Tuple:
UpperCAmelCase : List[Any] = graph.num_vertices
UpperCAmelCase : Optional[int] = Graph.UnionFind()
UpperCAmelCase : Optional[Any] = []
while num_components > 1:
UpperCAmelCase : Optional[Any] = {}
for vertex in graph.get_vertices():
UpperCAmelCase : Optional[Any] = -1
UpperCAmelCase : str = graph.get_edges()
for edge in edges:
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : Optional[int] = edge
edges.remove((tail, head, weight) )
for edge in edges:
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : List[Any] = edge
UpperCAmelCase : Tuple = union_find.find(A )
UpperCAmelCase : Union[str, Any] = union_find.find(A )
if seta != seta:
if cheap_edge[seta] == -1 or cheap_edge[seta][2] > weight:
UpperCAmelCase : Optional[int] = [head, tail, weight]
if cheap_edge[seta] == -1 or cheap_edge[seta][2] > weight:
UpperCAmelCase : Optional[Any] = [head, tail, weight]
for vertex in cheap_edge:
if cheap_edge[vertex] != -1:
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : str = cheap_edge[vertex]
if union_find.find(A ) != union_find.find(A ):
union_find.union(A , A )
mst_edges.append(cheap_edge[vertex] )
UpperCAmelCase : List[Any] = num_components - 1
UpperCAmelCase : Any = Graph.build(edges=A )
return mst
| 672 |
'''simple docstring'''
from typing import List, Union
import numpy as np
from ..tokenization_utils import TruncationStrategy
from ..utils import add_end_docstrings, logging
from .base import PIPELINE_INIT_ARGS, ArgumentHandler, ChunkPipeline
a : List[str] = logging.get_logger(__name__)
class UpperCamelCase_ ( __magic_name__ ):
def _lowercase( self , A ) -> Optional[int]:
if isinstance(A , A ):
UpperCAmelCase : Union[str, Any] = [label.strip() for label in labels.split(""",""" ) if label.strip()]
return labels
def __call__( self , A , A , A ) -> str:
if len(A ) == 0 or len(A ) == 0:
raise ValueError("""You must include at least one label and at least one sequence.""" )
if hypothesis_template.format(labels[0] ) == hypothesis_template:
raise ValueError(
(
"""The provided hypothesis_template \"{}\" was not able to be formatted with the target labels. """
"""Make sure the passed template includes formatting syntax such as {{}} where the label should go."""
).format(A ) )
if isinstance(A , A ):
UpperCAmelCase : Tuple = [sequences]
UpperCAmelCase : Optional[Any] = []
for sequence in sequences:
sequence_pairs.extend([[sequence, hypothesis_template.format(A )] for label in labels] )
return sequence_pairs, sequences
@add_end_docstrings(__magic_name__ )
class UpperCamelCase_ ( __magic_name__ ):
def __init__( self , A=ZeroShotClassificationArgumentHandler() , *A , **A ) -> Optional[int]:
UpperCAmelCase : Tuple = args_parser
super().__init__(*A , **A )
if self.entailment_id == -1:
logger.warning(
"""Failed to determine 'entailment' label id from the label2id mapping in the model config. Setting to """
"""-1. Define a descriptive label2id mapping in the model config to ensure correct outputs.""" )
@property
def _lowercase( self ) -> List[Any]:
for label, ind in self.model.config.labelaid.items():
if label.lower().startswith("""entail""" ):
return ind
return -1
def _lowercase( self , A , A=True , A=True , A=TruncationStrategy.ONLY_FIRST , **A ) -> str:
UpperCAmelCase : Tuple = self.framework
if self.tokenizer.pad_token is None:
# Override for tokenizers not supporting padding
logger.error(
"""Tokenizer was not supporting padding necessary for zero-shot, attempting to use """
""" `pad_token=eos_token`""" )
UpperCAmelCase : Any = self.tokenizer.eos_token
try:
UpperCAmelCase : Tuple = self.tokenizer(
A , add_special_tokens=A , return_tensors=A , padding=A , truncation=A , )
except Exception as e:
if "too short" in str(A ):
# tokenizers might yell that we want to truncate
# to a value that is not even reached by the input.
# In that case we don't want to truncate.
# It seems there's not a really better way to catch that
# exception.
UpperCAmelCase : List[str] = self.tokenizer(
A , add_special_tokens=A , return_tensors=A , padding=A , truncation=TruncationStrategy.DO_NOT_TRUNCATE , )
else:
raise e
return inputs
def _lowercase( self , **A ) -> Tuple:
if kwargs.get("""multi_class""" , A ) is not None:
UpperCAmelCase : Any = kwargs["""multi_class"""]
logger.warning(
"""The `multi_class` argument has been deprecated and renamed to `multi_label`. """
"""`multi_class` will be removed in a future version of Transformers.""" )
UpperCAmelCase : int = {}
if "candidate_labels" in kwargs:
UpperCAmelCase : Tuple = self._args_parser._parse_labels(kwargs["""candidate_labels"""] )
if "hypothesis_template" in kwargs:
UpperCAmelCase : List[Any] = kwargs["""hypothesis_template"""]
UpperCAmelCase : Dict = {}
if "multi_label" in kwargs:
UpperCAmelCase : Union[str, Any] = kwargs["""multi_label"""]
return preprocess_params, {}, postprocess_params
def __call__( self , A , *A , **A , ) -> Tuple:
if len(A ) == 0:
pass
elif len(A ) == 1 and "candidate_labels" not in kwargs:
UpperCAmelCase : Optional[Any] = args[0]
else:
raise ValueError(f'''Unable to understand extra arguments {args}''' )
return super().__call__(A , **A )
def _lowercase( self , A , A=None , A="This example is {}." ) -> List[Any]:
UpperCAmelCase , UpperCAmelCase : List[Any] = self._args_parser(A , A , A )
for i, (candidate_label, sequence_pair) in enumerate(zip(A , A ) ):
UpperCAmelCase : Any = self._parse_and_tokenize([sequence_pair] )
yield {
"candidate_label": candidate_label,
"sequence": sequences[0],
"is_last": i == len(A ) - 1,
**model_input,
}
def _lowercase( self , A ) -> Optional[int]:
UpperCAmelCase : Optional[Any] = inputs["""candidate_label"""]
UpperCAmelCase : Tuple = inputs["""sequence"""]
UpperCAmelCase : List[Any] = {k: inputs[k] for k in self.tokenizer.model_input_names}
UpperCAmelCase : Tuple = self.model(**A )
UpperCAmelCase : Optional[int] = {
"""candidate_label""": candidate_label,
"""sequence""": sequence,
"""is_last""": inputs["""is_last"""],
**outputs,
}
return model_outputs
def _lowercase( self , A , A=False ) -> List[str]:
UpperCAmelCase : Dict = [outputs["""candidate_label"""] for outputs in model_outputs]
UpperCAmelCase : List[Any] = [outputs["""sequence"""] for outputs in model_outputs]
UpperCAmelCase : List[Any] = np.concatenate([output["""logits"""].numpy() for output in model_outputs] )
UpperCAmelCase : Optional[Any] = logits.shape[0]
UpperCAmelCase : int = len(A )
UpperCAmelCase : List[Any] = N // n
UpperCAmelCase : int = logits.reshape((num_sequences, n, -1) )
if multi_label or len(A ) == 1:
# softmax over the entailment vs. contradiction dim for each label independently
UpperCAmelCase : str = self.entailment_id
UpperCAmelCase : str = -1 if entailment_id == 0 else 0
UpperCAmelCase : Optional[Any] = reshaped_outputs[..., [contradiction_id, entailment_id]]
UpperCAmelCase : int = np.exp(A ) / np.exp(A ).sum(-1 , keepdims=A )
UpperCAmelCase : int = scores[..., 1]
else:
# softmax the "entailment" logits over all candidate labels
UpperCAmelCase : Dict = reshaped_outputs[..., self.entailment_id]
UpperCAmelCase : Optional[int] = np.exp(A ) / np.exp(A ).sum(-1 , keepdims=A )
UpperCAmelCase : int = list(reversed(scores[0].argsort() ) )
return {
"sequence": sequences[0],
"labels": [candidate_labels[i] for i in top_inds],
"scores": scores[0, top_inds].tolist(),
}
| 672 | 1 |
'''simple docstring'''
import jax.numpy as jnp
from ...utils import logging
from ..ta.modeling_flax_ta import FlaxTaEncoderModel, FlaxTaForConditionalGeneration, FlaxTaModel
from .configuration_mta import MTaConfig
a : List[Any] = logging.get_logger(__name__)
a : str = """T5Config"""
def __lowerCamelCase ( _lowercase , _lowercase , _lowercase ) -> jnp.ndarray:
UpperCAmelCase : Tuple = jnp.zeros_like(_lowercase )
UpperCAmelCase : int = shifted_input_ids.at[:, 1:].set(input_ids[:, :-1] )
UpperCAmelCase : Any = shifted_input_ids.at[:, 0].set(_lowercase )
UpperCAmelCase : Tuple = jnp.where(shifted_input_ids == -1_0_0 , _lowercase , _lowercase )
return shifted_input_ids
class UpperCamelCase_ ( __magic_name__ ):
lowercase = 'mt5'
lowercase = MTaConfig
class UpperCamelCase_ ( __magic_name__ ):
lowercase = 'mt5'
lowercase = MTaConfig
class UpperCamelCase_ ( __magic_name__ ):
lowercase = 'mt5'
lowercase = MTaConfig
| 672 |
'''simple docstring'''
import unittest
from transformers import PegasusTokenizer, PegasusTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
a : List[Any] = get_tests_dir("""fixtures/test_sentencepiece_no_bos.model""")
@require_sentencepiece
@require_tokenizers
class UpperCamelCase_ ( __magic_name__ , unittest.TestCase ):
lowercase = PegasusTokenizer
lowercase = PegasusTokenizerFast
lowercase = True
lowercase = True
def _lowercase( self ) -> Tuple:
super().setUp()
# We have a SentencePiece fixture for testing
UpperCAmelCase : int = PegasusTokenizer(A )
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def _lowercase( self ) -> int:
return PegasusTokenizer.from_pretrained("""google/pegasus-large""" )
def _lowercase( self , **A ) -> PegasusTokenizer:
return PegasusTokenizer.from_pretrained(self.tmpdirname , **A )
def _lowercase( self , A ) -> List[str]:
return ("This is a test", "This is a test")
def _lowercase( self ) -> Optional[Any]:
UpperCAmelCase : Optional[int] = """</s>"""
UpperCAmelCase : Optional[int] = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(A ) , A )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(A ) , A )
def _lowercase( self ) -> Tuple:
UpperCAmelCase : str = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """<pad>""" )
self.assertEqual(vocab_keys[1] , """</s>""" )
self.assertEqual(vocab_keys[-1] , """v""" )
self.assertEqual(len(A ) , 1103 )
def _lowercase( self ) -> Optional[Any]:
self.assertEqual(self.get_tokenizer().vocab_size , 1103 )
def _lowercase( self ) -> int:
UpperCAmelCase : Any = self.rust_tokenizer_class.from_pretrained(self.tmpdirname )
UpperCAmelCase : Optional[Any] = self.tokenizer_class.from_pretrained(self.tmpdirname )
UpperCAmelCase : Union[str, Any] = (
"""Let's see which <unk> is the better <unk_token_11> one <mask_1> It seems like this <mask_2> was important"""
""" </s> <pad> <pad> <pad>"""
)
UpperCAmelCase : Optional[Any] = rust_tokenizer([raw_input_str] , return_tensors=A , add_special_tokens=A ).input_ids[0]
UpperCAmelCase : Optional[Any] = py_tokenizer([raw_input_str] , return_tensors=A , add_special_tokens=A ).input_ids[0]
self.assertListEqual(A , A )
def _lowercase( self ) -> Optional[Any]:
UpperCAmelCase : List[Any] = self._large_tokenizer
# <mask_1> masks whole sentence while <mask_2> masks single word
UpperCAmelCase : Any = """<mask_1> To ensure a <mask_2> flow of bank resolutions."""
UpperCAmelCase : Optional[Any] = [2, 413, 615, 114, 3, 1971, 113, 1679, 10710, 107, 1]
UpperCAmelCase : Optional[Any] = tokenizer([raw_input_str] , return_tensors=A ).input_ids[0]
self.assertListEqual(A , A )
def _lowercase( self ) -> int:
UpperCAmelCase : str = self._large_tokenizer
# The tracebacks for the following asserts are **better** without messages or self.assertEqual
assert tokenizer.vocab_size == 96103
assert tokenizer.pad_token_id == 0
assert tokenizer.eos_token_id == 1
assert tokenizer.offset == 103
assert tokenizer.unk_token_id == tokenizer.offset + 2 == 105
assert tokenizer.unk_token == "<unk>"
assert tokenizer.model_max_length == 1024
UpperCAmelCase : List[Any] = """To ensure a smooth flow of bank resolutions."""
UpperCAmelCase : Optional[int] = [413, 615, 114, 2291, 1971, 113, 1679, 10710, 107, 1]
UpperCAmelCase : Union[str, Any] = tokenizer([raw_input_str] , return_tensors=A ).input_ids[0]
self.assertListEqual(A , A )
assert tokenizer.convert_ids_to_tokens([0, 1, 2, 3] ) == ["<pad>", "</s>", "<mask_1>", "<mask_2>"]
@require_torch
def _lowercase( self ) -> Any:
UpperCAmelCase : int = ["""This is going to be way too long.""" * 150, """short example"""]
UpperCAmelCase : Optional[int] = ["""not super long but more than 5 tokens""", """tiny"""]
UpperCAmelCase : Tuple = self._large_tokenizer(A , padding=A , truncation=A , return_tensors="""pt""" )
UpperCAmelCase : List[Any] = self._large_tokenizer(
text_target=A , max_length=5 , padding=A , truncation=A , return_tensors="""pt""" )
assert batch.input_ids.shape == (2, 1024)
assert batch.attention_mask.shape == (2, 1024)
assert targets["input_ids"].shape == (2, 5)
assert len(A ) == 2 # input_ids, attention_mask.
@slow
def _lowercase( self ) -> List[str]:
# fmt: off
UpperCAmelCase : List[str] = {"""input_ids""": [[38979, 143, 18485, 606, 130, 26669, 87686, 121, 54189, 1129, 111, 26669, 87686, 121, 9114, 14787, 121, 13249, 158, 592, 956, 121, 14621, 31576, 143, 62613, 108, 9688, 930, 43430, 11562, 62613, 304, 108, 11443, 897, 108, 9314, 17415, 63399, 108, 11443, 7614, 18316, 118, 4284, 7148, 12430, 143, 1400, 25703, 158, 111, 4284, 7148, 11772, 143, 21297, 1064, 158, 122, 204, 3506, 1754, 1133, 14787, 1581, 115, 33224, 4482, 111, 1355, 110, 29173, 317, 50833, 108, 20147, 94665, 111, 77198, 107, 1], [110, 62613, 117, 638, 112, 1133, 121, 20098, 1355, 79050, 13872, 135, 1596, 53541, 1352, 141, 13039, 5542, 124, 302, 518, 111, 268, 2956, 115, 149, 4427, 107, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [139, 1235, 2799, 18289, 17780, 204, 109, 9474, 1296, 107, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=A , model_name="""google/bigbird-pegasus-large-arxiv""" , revision="""ba85d0851d708441f91440d509690f1ab6353415""" , )
@require_sentencepiece
@require_tokenizers
class UpperCamelCase_ ( __magic_name__ , unittest.TestCase ):
lowercase = PegasusTokenizer
lowercase = PegasusTokenizerFast
lowercase = True
lowercase = True
def _lowercase( self ) -> List[Any]:
super().setUp()
# We have a SentencePiece fixture for testing
UpperCAmelCase : int = PegasusTokenizer(A , offset=0 , mask_token_sent=A , mask_token="""[MASK]""" )
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def _lowercase( self ) -> Optional[Any]:
return PegasusTokenizer.from_pretrained("""google/bigbird-pegasus-large-arxiv""" )
def _lowercase( self , **A ) -> PegasusTokenizer:
return PegasusTokenizer.from_pretrained(self.tmpdirname , **A )
def _lowercase( self , A ) -> str:
return ("This is a test", "This is a test")
def _lowercase( self ) -> Union[str, Any]:
UpperCAmelCase : Dict = self.rust_tokenizer_class.from_pretrained(self.tmpdirname )
UpperCAmelCase : Any = self.tokenizer_class.from_pretrained(self.tmpdirname )
UpperCAmelCase : str = (
"""Let's see which <unk> is the better <unk_token> one [MASK] It seems like this [MASK] was important </s>"""
""" <pad> <pad> <pad>"""
)
UpperCAmelCase : List[str] = rust_tokenizer([raw_input_str] , return_tensors=A , add_special_tokens=A ).input_ids[0]
UpperCAmelCase : str = py_tokenizer([raw_input_str] , return_tensors=A , add_special_tokens=A ).input_ids[0]
self.assertListEqual(A , A )
@require_torch
def _lowercase( self ) -> Optional[int]:
UpperCAmelCase : Union[str, Any] = ["""This is going to be way too long.""" * 1000, """short example"""]
UpperCAmelCase : Any = ["""not super long but more than 5 tokens""", """tiny"""]
UpperCAmelCase : int = self._large_tokenizer(A , padding=A , truncation=A , return_tensors="""pt""" )
UpperCAmelCase : Optional[int] = self._large_tokenizer(
text_target=A , max_length=5 , padding=A , truncation=A , return_tensors="""pt""" )
assert batch.input_ids.shape == (2, 4096)
assert batch.attention_mask.shape == (2, 4096)
assert targets["input_ids"].shape == (2, 5)
assert len(A ) == 2 # input_ids, attention_mask.
def _lowercase( self ) -> int:
UpperCAmelCase : Union[str, Any] = (
"""This is an example string that is used to test the original TF implementation against the HF"""
""" implementation"""
)
UpperCAmelCase : Optional[Any] = self._large_tokenizer(A ).input_ids
self.assertListEqual(
A , [182, 117, 142, 587, 4211, 120, 117, 263, 112, 804, 109, 856, 25016, 3137, 464, 109, 26955, 3137, 1] , )
| 672 | 1 |
'''simple docstring'''
import json
import os
import subprocess
import unittest
from ast import literal_eval
import pytest
from parameterized import parameterized_class
from . import is_sagemaker_available
if is_sagemaker_available():
from sagemaker import Session, TrainingJobAnalytics
from sagemaker.huggingface import HuggingFace
@pytest.mark.skipif(
literal_eval(os.getenv('TEST_SAGEMAKER' , 'False' ) ) is not True , reason='Skipping test because should only be run when releasing minor transformers version' , )
@pytest.mark.usefixtures('sm_env' )
@parameterized_class(
[
{
'framework': 'pytorch',
'script': 'run_glue.py',
'model_name_or_path': 'distilbert-base-cased',
'instance_type': 'ml.g4dn.xlarge',
'results': {'train_runtime': 650, 'eval_accuracy': 0.6, 'eval_loss': 0.9},
},
{
'framework': 'tensorflow',
'script': 'run_tf.py',
'model_name_or_path': 'distilbert-base-cased',
'instance_type': 'ml.g4dn.xlarge',
'results': {'train_runtime': 600, 'eval_accuracy': 0.3, 'eval_loss': 0.9},
},
] )
class UpperCamelCase_ ( unittest.TestCase ):
def _lowercase( self ) -> Any:
if self.framework == "pytorch":
subprocess.run(
f'''cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py'''.split() , encoding="""utf-8""" , check=A , )
assert hasattr(self , """env""" )
def _lowercase( self , A=1 ) -> str:
# creates estimator
return HuggingFace(
entry_point=self.script , source_dir=self.env.test_path , role=self.env.role , image_uri=self.env.image_uri , base_job_name=f'''{self.env.base_job_name}-single''' , instance_count=A , instance_type=self.instance_type , debugger_hook_config=A , hyperparameters={**self.env.hyperparameters, """model_name_or_path""": self.model_name_or_path} , metric_definitions=self.env.metric_definitions , py_version="""py36""" , )
def _lowercase( self , A ) -> Optional[int]:
TrainingJobAnalytics(A ).export_csv(f'''{self.env.test_path}/{job_name}_metrics.csv''' )
def _lowercase( self ) -> List[str]:
# create estimator
UpperCAmelCase : int = self.create_estimator()
# run training
estimator.fit()
# result dataframe
UpperCAmelCase : Any = TrainingJobAnalytics(estimator.latest_training_job.name ).dataframe()
# extract kpis
UpperCAmelCase : Tuple = list(result_metrics_df[result_metrics_df.metric_name == """eval_accuracy"""]["""value"""] )
UpperCAmelCase : List[str] = list(result_metrics_df[result_metrics_df.metric_name == """eval_loss"""]["""value"""] )
# get train time from SageMaker job, this includes starting, preprocessing, stopping
UpperCAmelCase : Tuple = (
Session().describe_training_job(estimator.latest_training_job.name ).get("""TrainingTimeInSeconds""" , 999999 )
)
# assert kpis
assert train_runtime <= self.results["train_runtime"]
assert all(t >= self.results["""eval_accuracy"""] for t in eval_accuracy )
assert all(t <= self.results["""eval_loss"""] for t in eval_loss )
# dump tests result into json file to share in PR
with open(f'''{estimator.latest_training_job.name}.json''' , """w""" ) as outfile:
json.dump({"""train_time""": train_runtime, """eval_accuracy""": eval_accuracy, """eval_loss""": eval_loss} , A )
| 672 |
'''simple docstring'''
import inspect
import unittest
from transformers import ConvNextVaConfig
from transformers.models.auto import get_values
from transformers.models.auto.modeling_auto import MODEL_FOR_BACKBONE_MAPPING_NAMES, MODEL_MAPPING_NAMES
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import ConvNextVaBackbone, ConvNextVaForImageClassification, ConvNextVaModel
from transformers.models.convnextva.modeling_convnextva import CONVNEXTV2_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class UpperCamelCase_ :
def __init__( self , A , A=13 , A=32 , A=3 , A=4 , A=[10, 20, 30, 40] , A=[2, 2, 3, 2] , A=True , A=True , A=37 , A="gelu" , A=10 , A=0.0_2 , A=["stage2", "stage3", "stage4"] , A=[2, 3, 4] , A=None , ) -> int:
UpperCAmelCase : str = parent
UpperCAmelCase : List[Any] = batch_size
UpperCAmelCase : Dict = image_size
UpperCAmelCase : Tuple = num_channels
UpperCAmelCase : Union[str, Any] = num_stages
UpperCAmelCase : Any = hidden_sizes
UpperCAmelCase : str = depths
UpperCAmelCase : Optional[int] = is_training
UpperCAmelCase : Union[str, Any] = use_labels
UpperCAmelCase : Any = intermediate_size
UpperCAmelCase : str = hidden_act
UpperCAmelCase : List[str] = num_labels
UpperCAmelCase : Tuple = initializer_range
UpperCAmelCase : Optional[Any] = out_features
UpperCAmelCase : List[str] = out_indices
UpperCAmelCase : Any = scope
def _lowercase( self ) -> Union[str, Any]:
UpperCAmelCase : Tuple = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCAmelCase : List[Any] = None
if self.use_labels:
UpperCAmelCase : Dict = ids_tensor([self.batch_size] , self.num_labels )
UpperCAmelCase : List[str] = self.get_config()
return config, pixel_values, labels
def _lowercase( self ) -> Optional[Any]:
return ConvNextVaConfig(
num_channels=self.num_channels , hidden_sizes=self.hidden_sizes , depths=self.depths , num_stages=self.num_stages , hidden_act=self.hidden_act , is_decoder=A , initializer_range=self.initializer_range , out_features=self.out_features , out_indices=self.out_indices , num_labels=self.num_labels , )
def _lowercase( self , A , A , A ) -> Optional[Any]:
UpperCAmelCase : int = ConvNextVaModel(config=A )
model.to(A )
model.eval()
UpperCAmelCase : List[Any] = model(A )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def _lowercase( self , A , A , A ) -> Any:
UpperCAmelCase : List[str] = ConvNextVaForImageClassification(A )
model.to(A )
model.eval()
UpperCAmelCase : int = model(A , labels=A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _lowercase( self , A , A , A ) -> Any:
UpperCAmelCase : Optional[Any] = ConvNextVaBackbone(config=A )
model.to(A )
model.eval()
UpperCAmelCase : Any = model(A )
# verify hidden states
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[1], 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , config.hidden_sizes[1:] )
# verify backbone works with out_features=None
UpperCAmelCase : Any = None
UpperCAmelCase : Optional[int] = ConvNextVaBackbone(config=A )
model.to(A )
model.eval()
UpperCAmelCase : int = model(A )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[-1], 1, 1] )
# verify channels
self.parent.assertEqual(len(model.channels ) , 1 )
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] )
def _lowercase( self ) -> List[str]:
UpperCAmelCase : Dict = self.prepare_config_and_inputs()
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : List[Any] = config_and_inputs
UpperCAmelCase : str = {"""pixel_values""": pixel_values}
return config, inputs_dict
def _lowercase( self ) -> List[Any]:
UpperCAmelCase : List[str] = self.prepare_config_and_inputs()
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : Optional[Any] = config_and_inputs
UpperCAmelCase : List[str] = {"""pixel_values""": pixel_values, """labels""": labels}
return config, inputs_dict
@require_torch
class UpperCamelCase_ ( __magic_name__ , __magic_name__ , unittest.TestCase ):
lowercase = (
(
ConvNextVaModel,
ConvNextVaForImageClassification,
ConvNextVaBackbone,
)
if is_torch_available()
else ()
)
lowercase = (
{'feature-extraction': ConvNextVaModel, 'image-classification': ConvNextVaForImageClassification}
if is_torch_available()
else {}
)
lowercase = False
lowercase = False
lowercase = False
lowercase = False
lowercase = False
def _lowercase( self ) -> Optional[int]:
UpperCAmelCase : Dict = ConvNextVaModelTester(self )
UpperCAmelCase : List[str] = ConfigTester(self , config_class=A , has_text_modality=A , hidden_size=37 )
def _lowercase( self ) -> int:
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def _lowercase( self ) -> List[str]:
return
@unittest.skip(reason="""ConvNextV2 does not use inputs_embeds""" )
def _lowercase( self ) -> Dict:
pass
@unittest.skip(reason="""ConvNextV2 does not support input and output embeddings""" )
def _lowercase( self ) -> Any:
pass
@unittest.skip(reason="""ConvNextV2 does not use feedforward chunking""" )
def _lowercase( self ) -> int:
pass
def _lowercase( self ) -> Dict:
if not self.model_tester.is_training:
return
for model_class in self.all_model_classes:
UpperCAmelCase , UpperCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs_with_labels()
UpperCAmelCase : Optional[int] = True
if model_class.__name__ in [
*get_values(A ),
*get_values(A ),
]:
continue
UpperCAmelCase : Any = model_class(A )
model.to(A )
model.train()
UpperCAmelCase : List[str] = self._prepare_for_class(A , A , return_labels=A )
UpperCAmelCase : List[str] = model(**A ).loss
loss.backward()
def _lowercase( self ) -> Tuple:
if not self.model_tester.is_training:
return
for model_class in self.all_model_classes:
UpperCAmelCase , UpperCAmelCase : Any = self.model_tester.prepare_config_and_inputs_with_labels()
UpperCAmelCase : List[str] = False
UpperCAmelCase : int = True
if (
model_class.__name__
in [*get_values(A ), *get_values(A )]
or not model_class.supports_gradient_checkpointing
):
continue
UpperCAmelCase : Dict = model_class(A )
model.to(A )
model.gradient_checkpointing_enable()
model.train()
UpperCAmelCase : Any = self._prepare_for_class(A , A , return_labels=A )
UpperCAmelCase : Any = model(**A ).loss
loss.backward()
def _lowercase( self ) -> Tuple:
UpperCAmelCase , UpperCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase : int = model_class(A )
UpperCAmelCase : List[str] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase : Tuple = [*signature.parameters.keys()]
UpperCAmelCase : Optional[Any] = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , A )
def _lowercase( self ) -> Optional[Any]:
UpperCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*A )
def _lowercase( self ) -> List[str]:
def check_hidden_states_output(A , A , A ):
UpperCAmelCase : Optional[Any] = model_class(A )
model.to(A )
model.eval()
with torch.no_grad():
UpperCAmelCase : Dict = model(**self._prepare_for_class(A , A ) )
UpperCAmelCase : Tuple = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
UpperCAmelCase : Optional[Any] = self.model_tester.num_stages
self.assertEqual(len(A ) , expected_num_stages + 1 )
# ConvNextV2's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
UpperCAmelCase , UpperCAmelCase : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase : str = True
check_hidden_states_output(A , A , A )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
UpperCAmelCase : int = True
check_hidden_states_output(A , A , A )
def _lowercase( self ) -> Tuple:
UpperCAmelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*A )
@slow
def _lowercase( self ) -> Any:
for model_name in CONVNEXTV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase : Tuple = ConvNextVaModel.from_pretrained(A )
self.assertIsNotNone(A )
def __lowerCamelCase ( ) -> Optional[int]:
UpperCAmelCase : Optional[int] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class UpperCamelCase_ ( unittest.TestCase ):
@cached_property
def _lowercase( self ) -> str:
return AutoImageProcessor.from_pretrained("""facebook/convnextv2-tiny-1k-224""" ) if is_vision_available() else None
@slow
def _lowercase( self ) -> List[Any]:
UpperCAmelCase : Any = ConvNextVaForImageClassification.from_pretrained("""facebook/convnextv2-tiny-1k-224""" ).to(A )
UpperCAmelCase : List[Any] = self.default_image_processor
UpperCAmelCase : Any = prepare_img()
UpperCAmelCase : Tuple = preprocessor(images=A , return_tensors="""pt""" ).to(A )
# forward pass
with torch.no_grad():
UpperCAmelCase : Optional[Any] = model(**A )
# verify the logits
UpperCAmelCase : Dict = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , A )
UpperCAmelCase : Dict = torch.tensor([0.9_9_9_6, 0.1_9_6_6, -0.4_3_8_6] ).to(A )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , A , atol=1e-4 ) )
| 672 | 1 |
'''simple docstring'''
from typing import List, Optional, Union
import numpy as np
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import PaddingStrategy, TensorType, logging
a : Optional[Any] = logging.get_logger(__name__)
class UpperCamelCase_ ( __magic_name__ ):
lowercase = ['input_values', 'padding_mask']
def __init__( self , A = 1 , A = 24000 , A = 0.0 , A = None , A = None , **A , ) -> Optional[int]:
super().__init__(feature_size=A , sampling_rate=A , padding_value=A , **A )
UpperCAmelCase : Optional[int] = chunk_length_s
UpperCAmelCase : int = overlap
@property
def _lowercase( self ) -> Optional[int]:
if self.chunk_length_s is None:
return None
else:
return int(self.chunk_length_s * self.sampling_rate )
@property
def _lowercase( self ) -> Optional[int]:
if self.chunk_length_s is None or self.overlap is None:
return None
else:
return max(1 , int((1.0 - self.overlap) * self.chunk_length ) )
def __call__( self , A , A = None , A = False , A = None , A = None , A = None , ) -> BatchFeature:
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
f'''The model corresponding to this feature extractor: {self} was trained using a sampling rate of'''
f''' {self.sampling_rate}. Please make sure that the provided audio input was sampled with'''
f''' {self.sampling_rate} and not {sampling_rate}.''' )
else:
logger.warning(
"""It is strongly recommended to pass the `sampling_rate` argument to this function. """
"""Failing to do so can result in silent errors that might be hard to debug.""" )
if padding and truncation:
raise ValueError("""Both padding and truncation were set. Make sure you only set one.""" )
elif padding is None:
# by default let's pad the inputs
UpperCAmelCase : str = True
UpperCAmelCase : int = bool(
isinstance(A , (list, tuple) ) and (isinstance(raw_audio[0] , (np.ndarray, tuple, list) )) )
if is_batched:
UpperCAmelCase : Optional[int] = [np.asarray(A , dtype=np.floataa ).T for audio in raw_audio]
elif not is_batched and not isinstance(A , np.ndarray ):
UpperCAmelCase : str = np.asarray(A , dtype=np.floataa )
elif isinstance(A , np.ndarray ) and raw_audio.dtype is np.dtype(np.floataa ):
UpperCAmelCase : List[Any] = raw_audio.astype(np.floataa )
# always return batch
if not is_batched:
UpperCAmelCase : Dict = [np.asarray(A ).T]
# verify inputs are valid
for idx, example in enumerate(A ):
if example.ndim > 2:
raise ValueError(f'''Expected input shape (channels, length) but got shape {example.shape}''' )
if self.feature_size == 1 and example.ndim != 1:
raise ValueError(f'''Expected mono audio but example has {example.shape[-1]} channels''' )
if self.feature_size == 2 and example.shape[-1] != 2:
raise ValueError(f'''Expected stereo audio but example has {example.shape[-1]} channels''' )
UpperCAmelCase : Optional[Any] = None
UpperCAmelCase : Dict = BatchFeature({"""input_values""": raw_audio} )
if self.chunk_stride is not None and self.chunk_length is not None and max_length is None:
if truncation:
UpperCAmelCase : Any = min(array.shape[0] for array in raw_audio )
UpperCAmelCase : Optional[int] = int(np.floor(max_length / self.chunk_stride ) )
UpperCAmelCase : Union[str, Any] = (nb_step - 1) * self.chunk_stride + self.chunk_length
elif padding:
UpperCAmelCase : str = max(array.shape[0] for array in raw_audio )
UpperCAmelCase : Optional[Any] = int(np.ceil(max_length / self.chunk_stride ) )
UpperCAmelCase : List[Any] = (nb_step - 1) * self.chunk_stride + self.chunk_length
UpperCAmelCase : Optional[Any] = """max_length"""
else:
UpperCAmelCase : Union[str, Any] = input_values
# normal padding on batch
if padded_inputs is None:
UpperCAmelCase : Dict = self.pad(
A , max_length=A , truncation=A , padding=A , return_attention_mask=A , )
if padding:
UpperCAmelCase : int = padded_inputs.pop("""attention_mask""" )
UpperCAmelCase : List[str] = []
for example in padded_inputs.pop("""input_values""" ):
if self.feature_size == 1:
UpperCAmelCase : List[str] = example[..., None]
input_values.append(example.T )
UpperCAmelCase : List[str] = input_values
if return_tensors is not None:
UpperCAmelCase : List[str] = padded_inputs.convert_to_tensors(A )
return padded_inputs
| 672 |
'''simple docstring'''
from scipy.stats import pearsonr
import datasets
a : str = """
Pearson correlation coefficient and p-value for testing non-correlation.
The Pearson correlation coefficient measures the linear relationship between two datasets. The calculation of the p-value relies on the assumption that each dataset is normally distributed. Like other correlation coefficients, this one varies between -1 and +1 with 0 implying no correlation. Correlations of -1 or +1 imply an exact linear relationship. Positive correlations imply that as x increases, so does y. Negative correlations imply that as x increases, y decreases.
The p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Pearson correlation at least as extreme as the one computed from these datasets.
"""
a : Dict = """
Args:
predictions (`list` of `int`): Predicted class labels, as returned by a model.
references (`list` of `int`): Ground truth labels.
return_pvalue (`boolean`): If `True`, returns the p-value, along with the correlation coefficient. If `False`, returns only the correlation coefficient. Defaults to `False`.
Returns:
pearsonr (`float`): Pearson correlation coefficient. Minimum possible value is -1. Maximum possible value is 1. Values of 1 and -1 indicate exact linear positive and negative relationships, respectively. A value of 0 implies no correlation.
p-value (`float`): P-value, which roughly indicates the probability of an The p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Pearson correlation at least as extreme as the one computed from these datasets. Minimum possible value is 0. Maximum possible value is 1. Higher values indicate higher probabilities.
Examples:
Example 1-A simple example using only predictions and references.
>>> pearsonr_metric = datasets.load_metric(\"pearsonr\")
>>> results = pearsonr_metric.compute(predictions=[10, 9, 2.5, 6, 4], references=[1, 2, 3, 4, 5])
>>> print(round(results['pearsonr'], 2))
-0.74
Example 2-The same as Example 1, but that also returns the `p-value`.
>>> pearsonr_metric = datasets.load_metric(\"pearsonr\")
>>> results = pearsonr_metric.compute(predictions=[10, 9, 2.5, 6, 4], references=[1, 2, 3, 4, 5], return_pvalue=True)
>>> print(sorted(list(results.keys())))
['p-value', 'pearsonr']
>>> print(round(results['pearsonr'], 2))
-0.74
>>> print(round(results['p-value'], 2))
0.15
"""
a : Optional[int] = """
@article{2020SciPy-NMeth,
author = {Virtanen, Pauli and Gommers, Ralf and Oliphant, Travis E. and
Haberland, Matt and Reddy, Tyler and Cournapeau, David and
Burovski, Evgeni and Peterson, Pearu and Weckesser, Warren and
Bright, Jonathan and {van der Walt}, St{\'e}fan J. and
Brett, Matthew and Wilson, Joshua and Millman, K. Jarrod and
Mayorov, Nikolay and Nelson, Andrew R. J. and Jones, Eric and
Kern, Robert and Larson, Eric and Carey, C J and
Polat, Ilhan and Feng, Yu and Moore, Eric W. and
{VanderPlas}, Jake and Laxalde, Denis and Perktold, Josef and
Cimrman, Robert and Henriksen, Ian and Quintero, E. A. and
Harris, Charles R. and Archibald, Anne M. and
Ribeiro, Antonio H. and Pedregosa, Fabian and
{van Mulbregt}, Paul and {SciPy 1.0 Contributors}},
title = {{{SciPy} 1.0: Fundamental Algorithms for Scientific
Computing in Python}},
journal = {Nature Methods},
year = {2020},
volume = {17},
pages = {261--272},
adsurl = {https://rdcu.be/b08Wh},
doi = {10.1038/s41592-019-0686-2},
}
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class UpperCamelCase_ ( datasets.Metric ):
def _lowercase( self ) -> Optional[int]:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Value("""float""" ),
"""references""": datasets.Value("""float""" ),
} ) , reference_urls=["""https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.pearsonr.html"""] , )
def _lowercase( self , A , A , A=False ) -> int:
if return_pvalue:
UpperCAmelCase : int = pearsonr(A , A )
return {"pearsonr": results[0], "p-value": results[1]}
else:
return {"pearsonr": float(pearsonr(A , A )[0] )}
| 672 | 1 |
'''simple docstring'''
def __lowerCamelCase ( _lowercase = 1_0_0_0 ) -> int:
return sum(2 * a * ((a - 1) // 2) for a in range(3 , n + 1 ) )
if __name__ == "__main__":
print(solution())
| 672 |
'''simple docstring'''
from __future__ import annotations
from collections.abc import Sequence
from typing import Literal
def __lowerCamelCase ( _lowercase , _lowercase ) -> str | Literal[False]:
UpperCAmelCase : Optional[int] = list(_lowercase )
UpperCAmelCase : Dict = list(_lowercase )
UpperCAmelCase : str = 0
for i in range(len(_lowercase ) ):
if lista[i] != lista[i]:
count += 1
UpperCAmelCase : Optional[Any] = """_"""
if count > 1:
return False
else:
return "".join(_lowercase )
def __lowerCamelCase ( _lowercase ) -> list[str]:
UpperCAmelCase : List[str] = []
while True:
UpperCAmelCase : Optional[int] = ["""$"""] * len(_lowercase )
UpperCAmelCase : int = []
for i in range(len(_lowercase ) ):
for j in range(i + 1 , len(_lowercase ) ):
UpperCAmelCase : str = compare_string(binary[i] , binary[j] )
if k is False:
UpperCAmelCase : Union[str, Any] = """*"""
UpperCAmelCase : Optional[Any] = """*"""
temp.append("""X""" )
for i in range(len(_lowercase ) ):
if checka[i] == "$":
pi.append(binary[i] )
if len(_lowercase ) == 0:
return pi
UpperCAmelCase : List[Any] = list(set(_lowercase ) )
def __lowerCamelCase ( _lowercase , _lowercase ) -> list[str]:
UpperCAmelCase : Dict = []
for minterm in minterms:
UpperCAmelCase : List[str] = """"""
for _ in range(_lowercase ):
UpperCAmelCase : Dict = str(minterm % 2 ) + string
minterm //= 2
temp.append(_lowercase )
return temp
def __lowerCamelCase ( _lowercase , _lowercase , _lowercase ) -> bool:
UpperCAmelCase : Optional[int] = list(_lowercase )
UpperCAmelCase : Dict = list(_lowercase )
UpperCAmelCase : Dict = 0
for i in range(len(_lowercase ) ):
if lista[i] != lista[i]:
count_n += 1
return count_n == count
def __lowerCamelCase ( _lowercase , _lowercase ) -> list[str]:
UpperCAmelCase : Tuple = []
UpperCAmelCase : Optional[int] = [0] * len(_lowercase )
for i in range(len(chart[0] ) ):
UpperCAmelCase : Any = 0
UpperCAmelCase : Optional[Any] = -1
for j in range(len(_lowercase ) ):
if chart[j][i] == 1:
count += 1
UpperCAmelCase : str = j
if count == 1:
UpperCAmelCase : Optional[int] = 1
for i in range(len(_lowercase ) ):
if select[i] == 1:
for j in range(len(chart[0] ) ):
if chart[i][j] == 1:
for k in range(len(_lowercase ) ):
UpperCAmelCase : List[str] = 0
temp.append(prime_implicants[i] )
while True:
UpperCAmelCase : int = 0
UpperCAmelCase : Tuple = -1
UpperCAmelCase : Union[str, Any] = 0
for i in range(len(_lowercase ) ):
UpperCAmelCase : Optional[Any] = chart[i].count(1 )
if count_n > max_n:
UpperCAmelCase : Union[str, Any] = count_n
UpperCAmelCase : Optional[Any] = i
if max_n == 0:
return temp
temp.append(prime_implicants[rem] )
for i in range(len(chart[0] ) ):
if chart[rem][i] == 1:
for j in range(len(_lowercase ) ):
UpperCAmelCase : Optional[Any] = 0
def __lowerCamelCase ( _lowercase , _lowercase ) -> list[list[int]]:
UpperCAmelCase : Optional[int] = [[0 for x in range(len(_lowercase ) )] for x in range(len(_lowercase ) )]
for i in range(len(_lowercase ) ):
UpperCAmelCase : Tuple = prime_implicants[i].count("""_""" )
for j in range(len(_lowercase ) ):
if is_for_table(prime_implicants[i] , binary[j] , _lowercase ):
UpperCAmelCase : List[Any] = 1
return chart
def __lowerCamelCase ( ) -> None:
UpperCAmelCase : str = int(input("""Enter the no. of variables\n""" ) )
UpperCAmelCase : List[Any] = [
float(_lowercase )
for x in input(
"""Enter the decimal representation of Minterms 'Spaces Separated'\n""" ).split()
]
UpperCAmelCase : str = decimal_to_binary(_lowercase , _lowercase )
UpperCAmelCase : Tuple = check(_lowercase )
print("""Prime Implicants are:""" )
print(_lowercase )
UpperCAmelCase : Union[str, Any] = prime_implicant_chart(_lowercase , _lowercase )
UpperCAmelCase : Tuple = selection(_lowercase , _lowercase )
print("""Essential Prime Implicants are:""" )
print(_lowercase )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 672 | 1 |
'''simple docstring'''
import argparse
import hashlib # hashlib is only used inside the Test class
import struct
class UpperCamelCase_ :
def __init__( self , A ) -> Optional[Any]:
UpperCAmelCase : Any = data
UpperCAmelCase : Dict = [0X67452301, 0Xefcdab89, 0X98badcfe, 0X10325476, 0Xc3d2e1f0]
@staticmethod
def _lowercase( A , A ) -> Dict:
return ((n << b) | (n >> (32 - b))) & 0Xffffffff
def _lowercase( self ) -> str:
UpperCAmelCase : Any = B"""\x80""" + B"""\x00""" * (63 - (len(self.data ) + 8) % 64)
UpperCAmelCase : Tuple = self.data + padding + struct.pack(""">Q""" , 8 * len(self.data ) )
return padded_data
def _lowercase( self ) -> int:
return [
self.padded_data[i : i + 64] for i in range(0 , len(self.padded_data ) , 64 )
]
def _lowercase( self , A ) -> Tuple:
UpperCAmelCase : Union[str, Any] = list(struct.unpack(""">16L""" , A ) ) + [0] * 64
for i in range(16 , 80 ):
UpperCAmelCase : List[str] = self.rotate((w[i - 3] ^ w[i - 8] ^ w[i - 14] ^ w[i - 16]) , 1 )
return w
def _lowercase( self ) -> int:
UpperCAmelCase : Union[str, Any] = self.padding()
UpperCAmelCase : Optional[int] = self.split_blocks()
for block in self.blocks:
UpperCAmelCase : List[Any] = self.expand_block(A )
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : List[Any] = self.h
for i in range(0 , 80 ):
if 0 <= i < 20:
UpperCAmelCase : List[Any] = (b & c) | ((~b) & d)
UpperCAmelCase : Dict = 0X5a827999
elif 20 <= i < 40:
UpperCAmelCase : str = b ^ c ^ d
UpperCAmelCase : Union[str, Any] = 0X6ed9eba1
elif 40 <= i < 60:
UpperCAmelCase : List[str] = (b & c) | (b & d) | (c & d)
UpperCAmelCase : Union[str, Any] = 0X8f1bbcdc
elif 60 <= i < 80:
UpperCAmelCase : int = b ^ c ^ d
UpperCAmelCase : Union[str, Any] = 0Xca62c1d6
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : int = (
self.rotate(A , 5 ) + f + e + k + expanded_block[i] & 0Xffffffff,
a,
self.rotate(A , 30 ),
c,
d,
)
UpperCAmelCase : Optional[Any] = (
self.h[0] + a & 0Xffffffff,
self.h[1] + b & 0Xffffffff,
self.h[2] + c & 0Xffffffff,
self.h[3] + d & 0Xffffffff,
self.h[4] + e & 0Xffffffff,
)
return ("{:08x}" * 5).format(*self.h )
def __lowerCamelCase ( ) -> List[str]:
UpperCAmelCase : List[Any] = B"""Test String"""
assert SHAaHash(_lowercase ).final_hash() == hashlib.shaa(_lowercase ).hexdigest() # noqa: S324
def __lowerCamelCase ( ) -> Tuple:
UpperCAmelCase : Tuple = argparse.ArgumentParser(description="""Process some strings or files""" )
parser.add_argument(
"""--string""" , dest="""input_string""" , default="""Hello World!! Welcome to Cryptography""" , help="""Hash the string""" , )
parser.add_argument("""--file""" , dest="""input_file""" , help="""Hash contents of a file""" )
UpperCAmelCase : List[str] = parser.parse_args()
UpperCAmelCase : List[Any] = args.input_string
# In any case hash input should be a bytestring
if args.input_file:
with open(args.input_file , """rb""" ) as f:
UpperCAmelCase : str = f.read()
else:
UpperCAmelCase : Tuple = bytes(_lowercase , """utf-8""" )
print(SHAaHash(_lowercase ).final_hash() )
if __name__ == "__main__":
main()
import doctest
doctest.testmod()
| 672 |
'''simple docstring'''
a : Tuple = [sum(int(c, 1_0) ** 2 for c in i.__str__()) for i in range(1_0_0_0_0_0)]
def __lowerCamelCase ( _lowercase ) -> int:
UpperCAmelCase : str = 0
while number:
# Increased Speed Slightly by checking every 5 digits together.
sum_of_digits_squared += DIGITS_SQUARED[number % 1_0_0_0_0_0]
number //= 1_0_0_0_0_0
return sum_of_digits_squared
# There are 2 Chains made,
# One ends with 89 with the chain member 58 being the one which when declared first,
# there will be the least number of iterations for all the members to be checked.
# The other one ends with 1 and has only one element 1.
# So 58 and 1 are chosen to be declared at the starting.
# Changed dictionary to an array to quicken the solution
a : list[bool | None] = [None] * 1_0_0_0_0_0_0_0
a : Optional[Any] = True
a : List[Any] = False
def __lowerCamelCase ( _lowercase ) -> bool:
if CHAINS[number - 1] is not None:
return CHAINS[number - 1] # type: ignore
UpperCAmelCase : List[str] = chain(next_number(_lowercase ) )
UpperCAmelCase : Tuple = number_chain
while number < 1_0_0_0_0_0_0_0:
UpperCAmelCase : List[str] = number_chain
number *= 1_0
return number_chain
def __lowerCamelCase ( _lowercase = 1_0_0_0_0_0_0_0 ) -> int:
for i in range(1 , _lowercase ):
if CHAINS[i] is None:
chain(i + 1 )
return CHAINS[:number].count(_lowercase )
if __name__ == "__main__":
import doctest
doctest.testmod()
print(F'''{solution() = }''')
| 672 | 1 |
'''simple docstring'''
from copy import deepcopy
from typing import Optional, Union
import numpy as np
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, is_tf_available, is_torch_available
if is_torch_available():
import torch
if is_tf_available():
import tensorflow as tf
class UpperCamelCase_ ( __magic_name__ ):
lowercase = ['image_processor']
lowercase = 'SamImageProcessor'
def __init__( self , A ) -> Dict:
super().__init__(A )
UpperCAmelCase : List[Any] = self.image_processor
UpperCAmelCase : Any = -10
UpperCAmelCase : Optional[Any] = self.image_processor.size["""longest_edge"""]
def __call__( self , A=None , A=None , A=None , A=None , A = None , **A , ) -> BatchEncoding:
UpperCAmelCase : Optional[Any] = self.image_processor(
A , return_tensors=A , **A , )
# pop arguments that are not used in the foward but used nevertheless
UpperCAmelCase : List[str] = encoding_image_processor["""original_sizes"""]
if hasattr(A , """numpy""" ): # Checks if Torch or TF tensor
UpperCAmelCase : Any = original_sizes.numpy()
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : Optional[Any] = self._check_and_preprocess_points(
input_points=A , input_labels=A , input_boxes=A , )
UpperCAmelCase : Dict = self._normalize_and_convert(
A , A , input_points=A , input_labels=A , input_boxes=A , return_tensors=A , )
return encoding_image_processor
def _lowercase( self , A , A , A=None , A=None , A=None , A="pt" , ) -> Optional[int]:
if input_points is not None:
if len(A ) != len(A ):
UpperCAmelCase : Optional[int] = [
self._normalize_coordinates(self.target_size , A , original_sizes[0] ) for point in input_points
]
else:
UpperCAmelCase : str = [
self._normalize_coordinates(self.target_size , A , A )
for point, original_size in zip(A , A )
]
# check that all arrays have the same shape
if not all(point.shape == input_points[0].shape for point in input_points ):
if input_labels is not None:
UpperCAmelCase , UpperCAmelCase : Tuple = self._pad_points_and_labels(A , A )
UpperCAmelCase : Any = np.array(A )
if input_labels is not None:
UpperCAmelCase : Dict = np.array(A )
if input_boxes is not None:
if len(A ) != len(A ):
UpperCAmelCase : Optional[Any] = [
self._normalize_coordinates(self.target_size , A , original_sizes[0] , is_bounding_box=A )
for box in input_boxes
]
else:
UpperCAmelCase : str = [
self._normalize_coordinates(self.target_size , A , A , is_bounding_box=A )
for box, original_size in zip(A , A )
]
UpperCAmelCase : List[Any] = np.array(A )
if input_boxes is not None:
if return_tensors == "pt":
UpperCAmelCase : List[Any] = torch.from_numpy(A )
# boxes batch size of 1 by default
UpperCAmelCase : Tuple = input_boxes.unsqueeze(1 ) if len(input_boxes.shape ) != 3 else input_boxes
elif return_tensors == "tf":
UpperCAmelCase : int = tf.convert_to_tensor(A )
# boxes batch size of 1 by default
UpperCAmelCase : Any = tf.expand_dims(A , 1 ) if len(input_boxes.shape ) != 3 else input_boxes
encoding_image_processor.update({"""input_boxes""": input_boxes} )
if input_points is not None:
if return_tensors == "pt":
UpperCAmelCase : List[str] = torch.from_numpy(A )
# point batch size of 1 by default
UpperCAmelCase : str = input_points.unsqueeze(1 ) if len(input_points.shape ) != 4 else input_points
elif return_tensors == "tf":
UpperCAmelCase : int = tf.convert_to_tensor(A )
# point batch size of 1 by default
UpperCAmelCase : Optional[int] = tf.expand_dims(A , 1 ) if len(input_points.shape ) != 4 else input_points
encoding_image_processor.update({"""input_points""": input_points} )
if input_labels is not None:
if return_tensors == "pt":
UpperCAmelCase : Union[str, Any] = torch.from_numpy(A )
# point batch size of 1 by default
UpperCAmelCase : Any = input_labels.unsqueeze(1 ) if len(input_labels.shape ) != 3 else input_labels
elif return_tensors == "tf":
UpperCAmelCase : str = tf.convert_to_tensor(A )
# point batch size of 1 by default
UpperCAmelCase : Dict = tf.expand_dims(A , 1 ) if len(input_labels.shape ) != 3 else input_labels
encoding_image_processor.update({"""input_labels""": input_labels} )
return encoding_image_processor
def _lowercase( self , A , A ) -> str:
UpperCAmelCase : List[str] = max([point.shape[0] for point in input_points] )
UpperCAmelCase : List[str] = []
for i, point in enumerate(A ):
if point.shape[0] != expected_nb_points:
UpperCAmelCase : List[str] = np.concatenate(
[point, np.zeros((expected_nb_points - point.shape[0], 2) ) + self.point_pad_value] , axis=0 )
UpperCAmelCase : List[Any] = np.append(input_labels[i] , [self.point_pad_value] )
processed_input_points.append(A )
UpperCAmelCase : str = processed_input_points
return input_points, input_labels
def _lowercase( self , A , A , A , A=False ) -> np.ndarray:
UpperCAmelCase , UpperCAmelCase : Tuple = original_size
UpperCAmelCase , UpperCAmelCase : List[Any] = self.image_processor._get_preprocess_shape(A , longest_edge=A )
UpperCAmelCase : Any = deepcopy(A ).astype(A )
if is_bounding_box:
UpperCAmelCase : Tuple = coords.reshape(-1 , 2 , 2 )
UpperCAmelCase : int = coords[..., 0] * (new_w / old_w)
UpperCAmelCase : int = coords[..., 1] * (new_h / old_h)
if is_bounding_box:
UpperCAmelCase : List[str] = coords.reshape(-1 , 4 )
return coords
def _lowercase( self , A=None , A=None , A=None , ) -> List[str]:
if input_points is not None:
if hasattr(A , """numpy""" ): # Checks for TF or Torch tensor
UpperCAmelCase : Dict = input_points.numpy().tolist()
if not isinstance(A , A ) or not isinstance(input_points[0] , A ):
raise ValueError("""Input points must be a list of list of floating points.""" )
UpperCAmelCase : Any = [np.array(A ) for input_point in input_points]
else:
UpperCAmelCase : List[str] = None
if input_labels is not None:
if hasattr(A , """numpy""" ):
UpperCAmelCase : Any = input_labels.numpy().tolist()
if not isinstance(A , A ) or not isinstance(input_labels[0] , A ):
raise ValueError("""Input labels must be a list of list integers.""" )
UpperCAmelCase : Tuple = [np.array(A ) for label in input_labels]
else:
UpperCAmelCase : Any = None
if input_boxes is not None:
if hasattr(A , """numpy""" ):
UpperCAmelCase : Tuple = input_boxes.numpy().tolist()
if (
not isinstance(A , A )
or not isinstance(input_boxes[0] , A )
or not isinstance(input_boxes[0][0] , A )
):
raise ValueError("""Input boxes must be a list of list of list of floating points.""" )
UpperCAmelCase : Optional[int] = [np.array(A ).astype(np.floataa ) for box in input_boxes]
else:
UpperCAmelCase : Tuple = None
return input_points, input_labels, input_boxes
@property
def _lowercase( self ) -> Optional[Any]:
UpperCAmelCase : str = self.image_processor.model_input_names
return list(dict.fromkeys(A ) )
def _lowercase( self , *A , **A ) -> Tuple:
return self.image_processor.post_process_masks(*A , **A )
| 672 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tensorflow_text_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
a : Optional[Any] = {
"""configuration_bert""": ["""BERT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """BertConfig""", """BertOnnxConfig"""],
"""tokenization_bert""": ["""BasicTokenizer""", """BertTokenizer""", """WordpieceTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : Dict = ["""BertTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : List[Any] = [
"""BERT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""BertForMaskedLM""",
"""BertForMultipleChoice""",
"""BertForNextSentencePrediction""",
"""BertForPreTraining""",
"""BertForQuestionAnswering""",
"""BertForSequenceClassification""",
"""BertForTokenClassification""",
"""BertLayer""",
"""BertLMHeadModel""",
"""BertModel""",
"""BertPreTrainedModel""",
"""load_tf_weights_in_bert""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : List[str] = [
"""TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFBertEmbeddings""",
"""TFBertForMaskedLM""",
"""TFBertForMultipleChoice""",
"""TFBertForNextSentencePrediction""",
"""TFBertForPreTraining""",
"""TFBertForQuestionAnswering""",
"""TFBertForSequenceClassification""",
"""TFBertForTokenClassification""",
"""TFBertLMHeadModel""",
"""TFBertMainLayer""",
"""TFBertModel""",
"""TFBertPreTrainedModel""",
]
try:
if not is_tensorflow_text_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : int = ["""TFBertTokenizer"""]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : Optional[Any] = [
"""FlaxBertForCausalLM""",
"""FlaxBertForMaskedLM""",
"""FlaxBertForMultipleChoice""",
"""FlaxBertForNextSentencePrediction""",
"""FlaxBertForPreTraining""",
"""FlaxBertForQuestionAnswering""",
"""FlaxBertForSequenceClassification""",
"""FlaxBertForTokenClassification""",
"""FlaxBertModel""",
"""FlaxBertPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_bert import BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, BertConfig, BertOnnxConfig
from .tokenization_bert import BasicTokenizer, BertTokenizer, WordpieceTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bert_fast import BertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_bert import (
BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
BertForMaskedLM,
BertForMultipleChoice,
BertForNextSentencePrediction,
BertForPreTraining,
BertForQuestionAnswering,
BertForSequenceClassification,
BertForTokenClassification,
BertLayer,
BertLMHeadModel,
BertModel,
BertPreTrainedModel,
load_tf_weights_in_bert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_bert import (
TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFBertEmbeddings,
TFBertForMaskedLM,
TFBertForMultipleChoice,
TFBertForNextSentencePrediction,
TFBertForPreTraining,
TFBertForQuestionAnswering,
TFBertForSequenceClassification,
TFBertForTokenClassification,
TFBertLMHeadModel,
TFBertMainLayer,
TFBertModel,
TFBertPreTrainedModel,
)
try:
if not is_tensorflow_text_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bert_tf import TFBertTokenizer
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_bert import (
FlaxBertForCausalLM,
FlaxBertForMaskedLM,
FlaxBertForMultipleChoice,
FlaxBertForNextSentencePrediction,
FlaxBertForPreTraining,
FlaxBertForQuestionAnswering,
FlaxBertForSequenceClassification,
FlaxBertForTokenClassification,
FlaxBertModel,
FlaxBertPreTrainedModel,
)
else:
import sys
a : Optional[int] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 672 | 1 |
'''simple docstring'''
import unittest
from transformers import BertGenerationConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import BertGenerationDecoder, BertGenerationEncoder
class UpperCamelCase_ :
def __init__( self , A , A=13 , A=7 , A=True , A=True , A=99 , A=32 , A=5 , A=4 , A=37 , A="gelu" , A=0.1 , A=0.1 , A=50 , A=0.0_2 , A=True , A=None , ) -> Optional[Any]:
UpperCAmelCase : Any = parent
UpperCAmelCase : Union[str, Any] = batch_size
UpperCAmelCase : str = seq_length
UpperCAmelCase : Optional[int] = is_training
UpperCAmelCase : Optional[int] = use_input_mask
UpperCAmelCase : Tuple = vocab_size
UpperCAmelCase : Dict = hidden_size
UpperCAmelCase : Tuple = num_hidden_layers
UpperCAmelCase : List[Any] = num_attention_heads
UpperCAmelCase : str = intermediate_size
UpperCAmelCase : Union[str, Any] = hidden_act
UpperCAmelCase : List[str] = hidden_dropout_prob
UpperCAmelCase : List[Any] = attention_probs_dropout_prob
UpperCAmelCase : Any = max_position_embeddings
UpperCAmelCase : Dict = initializer_range
UpperCAmelCase : Any = use_labels
UpperCAmelCase : Optional[Any] = scope
def _lowercase( self ) -> Union[str, Any]:
UpperCAmelCase : str = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCAmelCase : Any = None
if self.use_input_mask:
UpperCAmelCase : Dict = random_attention_mask([self.batch_size, self.seq_length] )
if self.use_labels:
UpperCAmelCase : Any = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCAmelCase : Optional[Any] = self.get_config()
return config, input_ids, input_mask, token_labels
def _lowercase( self ) -> Any:
return BertGenerationConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , is_decoder=A , initializer_range=self.initializer_range , )
def _lowercase( self ) -> Optional[Any]:
(
(
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) ,
) : Union[str, Any] = self.prepare_config_and_inputs()
UpperCAmelCase : List[Any] = True
UpperCAmelCase : int = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
UpperCAmelCase : List[Any] = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
input_mask,
token_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def _lowercase( self , A , A , A , A , **A , ) -> List[Any]:
UpperCAmelCase : Optional[int] = BertGenerationEncoder(config=A )
model.to(A )
model.eval()
UpperCAmelCase : Dict = model(A , attention_mask=A )
UpperCAmelCase : int = model(A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _lowercase( self , A , A , A , A , A , A , **A , ) -> Any:
UpperCAmelCase : Any = True
UpperCAmelCase : Optional[Any] = BertGenerationEncoder(config=A )
model.to(A )
model.eval()
UpperCAmelCase : Tuple = model(
A , attention_mask=A , encoder_hidden_states=A , encoder_attention_mask=A , )
UpperCAmelCase : Dict = model(
A , attention_mask=A , encoder_hidden_states=A , )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _lowercase( self , A , A , A , A , A , A , **A , ) -> Dict:
UpperCAmelCase : Optional[int] = True
UpperCAmelCase : Optional[int] = True
UpperCAmelCase : List[str] = BertGenerationDecoder(config=A ).to(A ).eval()
# first forward pass
UpperCAmelCase : int = model(
A , attention_mask=A , encoder_hidden_states=A , encoder_attention_mask=A , use_cache=A , )
UpperCAmelCase : List[Any] = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
UpperCAmelCase : int = ids_tensor((self.batch_size, 3) , config.vocab_size )
UpperCAmelCase : List[Any] = ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
UpperCAmelCase : List[str] = torch.cat([input_ids, next_tokens] , dim=-1 )
UpperCAmelCase : Any = torch.cat([input_mask, next_mask] , dim=-1 )
UpperCAmelCase : Optional[int] = model(
A , attention_mask=A , encoder_hidden_states=A , encoder_attention_mask=A , output_hidden_states=A , )["""hidden_states"""][0]
UpperCAmelCase : Dict = model(
A , attention_mask=A , encoder_hidden_states=A , encoder_attention_mask=A , past_key_values=A , output_hidden_states=A , )["""hidden_states"""][0]
# select random slice
UpperCAmelCase : Tuple = ids_tensor((1,) , output_from_past.shape[-1] ).item()
UpperCAmelCase : Tuple = output_from_no_past[:, -3:, random_slice_idx].detach()
UpperCAmelCase : List[str] = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(A , A , atol=1e-3 ) )
def _lowercase( self , A , A , A , A , *A , ) -> Optional[int]:
UpperCAmelCase : int = BertGenerationDecoder(A )
model.to(A )
model.eval()
UpperCAmelCase : int = model(A , attention_mask=A , labels=A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _lowercase( self ) -> Optional[Any]:
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : List[Any] = self.prepare_config_and_inputs()
UpperCAmelCase : int = {"""input_ids""": input_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class UpperCamelCase_ ( __magic_name__ , __magic_name__ , __magic_name__ , unittest.TestCase ):
lowercase = (BertGenerationEncoder, BertGenerationDecoder) if is_torch_available() else ()
lowercase = (BertGenerationDecoder,) if is_torch_available() else ()
lowercase = (
{'feature-extraction': BertGenerationEncoder, 'text-generation': BertGenerationDecoder}
if is_torch_available()
else {}
)
def _lowercase( self ) -> List[str]:
UpperCAmelCase : int = BertGenerationEncoderTester(self )
UpperCAmelCase : List[str] = ConfigTester(self , config_class=A , hidden_size=37 )
def _lowercase( self ) -> int:
self.config_tester.run_common_tests()
def _lowercase( self ) -> Optional[int]:
UpperCAmelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*A )
def _lowercase( self ) -> Dict:
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs()
UpperCAmelCase : str = """bert"""
self.model_tester.create_and_check_model(A , A , A , A )
def _lowercase( self ) -> Any:
UpperCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(*A )
def _lowercase( self ) -> Tuple:
UpperCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_decoder_model_past_large_inputs(*A )
def _lowercase( self ) -> List[str]:
# This regression test was failing with PyTorch < 1.3
(
(
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) ,
) : str = self.model_tester.prepare_config_and_inputs_for_decoder()
UpperCAmelCase : Optional[Any] = None
self.model_tester.create_and_check_model_as_decoder(
A , A , A , A , A , A , )
def _lowercase( self ) -> Tuple:
UpperCAmelCase : int = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_for_causal_lm(*A )
@slow
def _lowercase( self ) -> Optional[int]:
UpperCAmelCase : Optional[int] = BertGenerationEncoder.from_pretrained("""google/bert_for_seq_generation_L-24_bbc_encoder""" )
self.assertIsNotNone(A )
@require_torch
class UpperCamelCase_ ( unittest.TestCase ):
@slow
def _lowercase( self ) -> Any:
UpperCAmelCase : Optional[int] = BertGenerationEncoder.from_pretrained("""google/bert_for_seq_generation_L-24_bbc_encoder""" )
UpperCAmelCase : str = torch.tensor([[101, 7592, 1010, 2026, 3899, 2003, 10140, 102]] )
with torch.no_grad():
UpperCAmelCase : Tuple = model(A )[0]
UpperCAmelCase : Optional[Any] = torch.Size([1, 8, 1024] )
self.assertEqual(output.shape , A )
UpperCAmelCase : Optional[int] = torch.tensor(
[[[0.1_7_7_5, 0.0_0_8_3, -0.0_3_2_1], [1.6_0_0_2, 0.1_2_8_7, 0.3_9_1_2], [2.1_4_7_3, 0.5_7_9_1, 0.6_0_6_6]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , A , atol=1e-4 ) )
@require_torch
class UpperCamelCase_ ( unittest.TestCase ):
@slow
def _lowercase( self ) -> str:
UpperCAmelCase : Dict = BertGenerationDecoder.from_pretrained("""google/bert_for_seq_generation_L-24_bbc_encoder""" )
UpperCAmelCase : Dict = torch.tensor([[101, 7592, 1010, 2026, 3899, 2003, 10140, 102]] )
with torch.no_grad():
UpperCAmelCase : Union[str, Any] = model(A )[0]
UpperCAmelCase : str = torch.Size([1, 8, 50358] )
self.assertEqual(output.shape , A )
UpperCAmelCase : List[str] = torch.tensor(
[[[-0.5_7_8_8, -2.5_9_9_4, -3.7_0_5_4], [0.0_4_3_8, 4.7_9_9_7, 1.8_7_9_5], [1.5_8_6_2, 6.6_4_0_9, 4.4_6_3_8]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , A , atol=1e-4 ) )
| 672 |
'''simple docstring'''
import math
import random
from typing import Any
from .hill_climbing import SearchProblem
def __lowerCamelCase ( _lowercase , _lowercase = True , _lowercase = math.inf , _lowercase = -math.inf , _lowercase = math.inf , _lowercase = -math.inf , _lowercase = False , _lowercase = 1_0_0 , _lowercase = 0.01 , _lowercase = 1 , ) -> Any:
UpperCAmelCase : Optional[int] = False
UpperCAmelCase : Any = search_prob
UpperCAmelCase : Any = start_temperate
UpperCAmelCase : Optional[Any] = []
UpperCAmelCase : Optional[Any] = 0
UpperCAmelCase : Optional[Any] = None
while not search_end:
UpperCAmelCase : List[str] = current_state.score()
if best_state is None or current_score > best_state.score():
UpperCAmelCase : List[Any] = current_state
scores.append(_lowercase )
iterations += 1
UpperCAmelCase : Dict = None
UpperCAmelCase : Union[str, Any] = current_state.get_neighbors()
while (
next_state is None and neighbors
): # till we do not find a neighbor that we can move to
UpperCAmelCase : int = random.randint(0 , len(_lowercase ) - 1 ) # picking a random neighbor
UpperCAmelCase : int = neighbors.pop(_lowercase )
UpperCAmelCase : Tuple = picked_neighbor.score() - current_score
if (
picked_neighbor.x > max_x
or picked_neighbor.x < min_x
or picked_neighbor.y > max_y
or picked_neighbor.y < min_y
):
continue # neighbor outside our bounds
if not find_max:
UpperCAmelCase : Union[str, Any] = change * -1 # in case we are finding minimum
if change > 0: # improves the solution
UpperCAmelCase : int = picked_neighbor
else:
UpperCAmelCase : Optional[Any] = (math.e) ** (
change / current_temp
) # probability generation function
if random.random() < probability: # random number within probability
UpperCAmelCase : Optional[int] = picked_neighbor
UpperCAmelCase : List[Any] = current_temp - (current_temp * rate_of_decrease)
if current_temp < threshold_temp or next_state is None:
# temperature below threshold, or could not find a suitable neighbor
UpperCAmelCase : Optional[int] = True
else:
UpperCAmelCase : Optional[int] = next_state
if visualization:
from matplotlib import pyplot as plt
plt.plot(range(_lowercase ) , _lowercase )
plt.xlabel("""Iterations""" )
plt.ylabel("""Function values""" )
plt.show()
return best_state
if __name__ == "__main__":
def __lowerCamelCase ( _lowercase , _lowercase ) -> str:
return (x**2) + (y**2)
# starting the problem with initial coordinates (12, 47)
a : Dict = SearchProblem(x=1_2, y=4_7, step_size=1, function_to_optimize=test_fa)
a : Dict = simulated_annealing(
prob, find_max=False, max_x=1_0_0, min_x=5, max_y=5_0, min_y=-5, visualization=True
)
print(
"""The minimum score for f(x, y) = x^2 + y^2 with the domain 100 > x > 5 """
F'''and 50 > y > - 5 found via hill climbing: {local_min.score()}'''
)
# starting the problem with initial coordinates (12, 47)
a : List[str] = SearchProblem(x=1_2, y=4_7, step_size=1, function_to_optimize=test_fa)
a : Dict = simulated_annealing(
prob, find_max=True, max_x=1_0_0, min_x=5, max_y=5_0, min_y=-5, visualization=True
)
print(
"""The maximum score for f(x, y) = x^2 + y^2 with the domain 100 > x > 5 """
F'''and 50 > y > - 5 found via hill climbing: {local_min.score()}'''
)
def __lowerCamelCase ( _lowercase , _lowercase ) -> Optional[Any]:
return (3 * x**2) - (6 * y)
a : Tuple = SearchProblem(x=3, y=4, step_size=1, function_to_optimize=test_fa)
a : Any = simulated_annealing(prob, find_max=False, visualization=True)
print(
"""The minimum score for f(x, y) = 3*x^2 - 6*y found via hill climbing: """
F'''{local_min.score()}'''
)
a : List[Any] = SearchProblem(x=3, y=4, step_size=1, function_to_optimize=test_fa)
a : Dict = simulated_annealing(prob, find_max=True, visualization=True)
print(
"""The maximum score for f(x, y) = 3*x^2 - 6*y found via hill climbing: """
F'''{local_min.score()}'''
)
| 672 | 1 |
'''simple docstring'''
import copy
from dataclasses import dataclass
from pathlib import Path
from typing import Dict, Optional, Union
@dataclass
class UpperCamelCase_ :
lowercase = None
lowercase = False
lowercase = False
lowercase = False
lowercase = None
lowercase = None
lowercase = False
lowercase = False
lowercase = False
lowercase = True
lowercase = None
lowercase = 1
lowercase = None
lowercase = False
lowercase = None
lowercase = None
def _lowercase( self ) -> "DownloadConfig":
return self.__class__(**{k: copy.deepcopy(A ) for k, v in self.__dict__.items()} )
| 672 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
a : Any = {
"""configuration_nezha""": ["""NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP""", """NezhaConfig"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : Any = [
"""NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""NezhaForNextSentencePrediction""",
"""NezhaForMaskedLM""",
"""NezhaForPreTraining""",
"""NezhaForMultipleChoice""",
"""NezhaForQuestionAnswering""",
"""NezhaForSequenceClassification""",
"""NezhaForTokenClassification""",
"""NezhaModel""",
"""NezhaPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_nezha import NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP, NezhaConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_nezha import (
NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST,
NezhaForMaskedLM,
NezhaForMultipleChoice,
NezhaForNextSentencePrediction,
NezhaForPreTraining,
NezhaForQuestionAnswering,
NezhaForSequenceClassification,
NezhaForTokenClassification,
NezhaModel,
NezhaPreTrainedModel,
)
else:
import sys
a : Union[str, Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 672 | 1 |
'''simple docstring'''
from queue import Queue
from typing import TYPE_CHECKING, Optional
if TYPE_CHECKING:
from ..models.auto import AutoTokenizer
class UpperCamelCase_ :
def _lowercase( self , A ) -> Optional[int]:
raise NotImplementedError()
def _lowercase( self ) -> Any:
raise NotImplementedError()
class UpperCamelCase_ ( __magic_name__ ):
def __init__( self , A , A = False , **A ) -> List[Any]:
UpperCAmelCase : Any = tokenizer
UpperCAmelCase : List[str] = skip_prompt
UpperCAmelCase : int = decode_kwargs
# variables used in the streaming process
UpperCAmelCase : int = []
UpperCAmelCase : Any = 0
UpperCAmelCase : Dict = True
def _lowercase( self , A ) -> Optional[Any]:
if len(value.shape ) > 1 and value.shape[0] > 1:
raise ValueError("""TextStreamer only supports batch size 1""" )
elif len(value.shape ) > 1:
UpperCAmelCase : int = value[0]
if self.skip_prompt and self.next_tokens_are_prompt:
UpperCAmelCase : Optional[int] = False
return
# Add the new token to the cache and decodes the entire thing.
self.token_cache.extend(value.tolist() )
UpperCAmelCase : Optional[int] = self.tokenizer.decode(self.token_cache , **self.decode_kwargs )
# After the symbol for a new line, we flush the cache.
if text.endswith("""\n""" ):
UpperCAmelCase : List[str] = text[self.print_len :]
UpperCAmelCase : Any = []
UpperCAmelCase : List[Any] = 0
# If the last token is a CJK character, we print the characters.
elif len(A ) > 0 and self._is_chinese_char(ord(text[-1] ) ):
UpperCAmelCase : Tuple = text[self.print_len :]
self.print_len += len(A )
# Otherwise, prints until the last space char (simple heuristic to avoid printing incomplete words,
# which may change with the subsequent token -- there are probably smarter ways to do this!)
else:
UpperCAmelCase : Dict = text[self.print_len : text.rfind(""" """ ) + 1]
self.print_len += len(A )
self.on_finalized_text(A )
def _lowercase( self ) -> int:
# Flush the cache, if it exists
if len(self.token_cache ) > 0:
UpperCAmelCase : Dict = self.tokenizer.decode(self.token_cache , **self.decode_kwargs )
UpperCAmelCase : List[Any] = text[self.print_len :]
UpperCAmelCase : Optional[int] = []
UpperCAmelCase : Union[str, Any] = 0
else:
UpperCAmelCase : List[Any] = """"""
UpperCAmelCase : Dict = True
self.on_finalized_text(A , stream_end=A )
def _lowercase( self , A , A = False ) -> Dict:
print(A , flush=A , end="""""" if not stream_end else None )
def _lowercase( self , A ) -> Tuple:
# This defines a "chinese character" as anything in the CJK Unicode block:
# https://en.wikipedia.org/wiki/CJK_Unified_Ideographs_(Unicode_block)
#
# Note that the CJK Unicode block is NOT all Japanese and Korean characters,
# despite its name. The modern Korean Hangul alphabet is a different block,
# as is Japanese Hiragana and Katakana. Those alphabets are used to write
# space-separated words, so they are not treated specially and handled
# like the all of the other languages.
if (
(cp >= 0X4e00 and cp <= 0X9fff)
or (cp >= 0X3400 and cp <= 0X4dbf) #
or (cp >= 0X20000 and cp <= 0X2a6df) #
or (cp >= 0X2a700 and cp <= 0X2b73f) #
or (cp >= 0X2b740 and cp <= 0X2b81f) #
or (cp >= 0X2b820 and cp <= 0X2ceaf) #
or (cp >= 0Xf900 and cp <= 0Xfaff)
or (cp >= 0X2f800 and cp <= 0X2fa1f) #
): #
return True
return False
class UpperCamelCase_ ( __magic_name__ ):
def __init__( self , A , A = False , A = None , **A ) -> Dict:
super().__init__(A , A , **A )
UpperCAmelCase : Optional[int] = Queue()
UpperCAmelCase : Dict = None
UpperCAmelCase : List[str] = timeout
def _lowercase( self , A , A = False ) -> Union[str, Any]:
self.text_queue.put(A , timeout=self.timeout )
if stream_end:
self.text_queue.put(self.stop_signal , timeout=self.timeout )
def __iter__( self ) -> List[str]:
return self
def _lowercase( self ) -> Union[str, Any]:
UpperCAmelCase : Optional[int] = self.text_queue.get(timeout=self.timeout )
if value == self.stop_signal:
raise StopIteration()
else:
return value
| 672 |
'''simple docstring'''
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionImageVariationPipeline
from diffusers.utils.testing_utils import load_image, require_torch_gpu, slow, torch_device
a : Tuple = False
class UpperCamelCase_ ( unittest.TestCase ):
pass
@slow
@require_torch_gpu
class UpperCamelCase_ ( unittest.TestCase ):
def _lowercase( self ) -> Optional[Any]:
UpperCAmelCase : Any = VersatileDiffusionImageVariationPipeline.from_pretrained("""shi-labs/versatile-diffusion""" )
pipe.to(A )
pipe.set_progress_bar_config(disable=A )
UpperCAmelCase : List[str] = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg""" )
UpperCAmelCase : List[Any] = torch.manual_seed(0 )
UpperCAmelCase : List[str] = pipe(
image=A , generator=A , guidance_scale=7.5 , num_inference_steps=50 , output_type="""numpy""" , ).images
UpperCAmelCase : Tuple = image[0, 253:256, 253:256, -1]
assert image.shape == (1, 512, 512, 3)
UpperCAmelCase : List[str] = np.array([0.0_4_4_1, 0.0_4_6_9, 0.0_5_0_7, 0.0_5_7_5, 0.0_6_3_2, 0.0_6_5_0, 0.0_8_6_5, 0.0_9_0_9, 0.0_9_4_5] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 672 | 1 |
'''simple docstring'''
import math
def __lowerCamelCase ( ) -> None:
UpperCAmelCase : str = input("""Enter message: """ )
UpperCAmelCase : Union[str, Any] = int(input(F'''Enter key [2-{len(_lowercase ) - 1}]: ''' ) )
UpperCAmelCase : Dict = input("""Encryption/Decryption [e/d]: """ )
if mode.lower().startswith("""e""" ):
UpperCAmelCase : List[str] = encrypt_message(_lowercase , _lowercase )
elif mode.lower().startswith("""d""" ):
UpperCAmelCase : Optional[Any] = decrypt_message(_lowercase , _lowercase )
# Append pipe symbol (vertical bar) to identify spaces at the end.
print(F'''Output:\n{text + '|'}''' )
def __lowerCamelCase ( _lowercase , _lowercase ) -> str:
UpperCAmelCase : Any = [""""""] * key
for col in range(_lowercase ):
UpperCAmelCase : List[str] = col
while pointer < len(_lowercase ):
cipher_text[col] += message[pointer]
pointer += key
return "".join(_lowercase )
def __lowerCamelCase ( _lowercase , _lowercase ) -> str:
UpperCAmelCase : Tuple = math.ceil(len(_lowercase ) / key )
UpperCAmelCase : List[Any] = key
UpperCAmelCase : Union[str, Any] = (num_cols * num_rows) - len(_lowercase )
UpperCAmelCase : Optional[Any] = [""""""] * num_cols
UpperCAmelCase : Tuple = 0
UpperCAmelCase : List[str] = 0
for symbol in message:
plain_text[col] += symbol
col += 1
if (
(col == num_cols)
or (col == num_cols - 1)
and (row >= num_rows - num_shaded_boxes)
):
UpperCAmelCase : Tuple = 0
row += 1
return "".join(_lowercase )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 672 |
'''simple docstring'''
# Lint as: python3
import sys
from collections.abc import Mapping
from typing import TYPE_CHECKING, Dict, Optional
import numpy as np
import pyarrow as pa
from .. import config
from ..utils.logging import get_logger
from ..utils.py_utils import map_nested
from .formatting import TensorFormatter
if TYPE_CHECKING:
import jax
import jaxlib
a : Any = get_logger()
a : Optional[dict] = None
class UpperCamelCase_ ( TensorFormatter[Mapping, 'jax.Array', Mapping] ):
def __init__( self , A=None , A=None , **A ) -> str:
super().__init__(features=A )
import jax
from jaxlib.xla_client import Device
if isinstance(A , A ):
raise ValueError(
f'''Expected {device} to be a `str` not {type(A )}, as `jaxlib.xla_extension.Device` '''
"""is not serializable neither with `pickle` nor with `dill`. Instead you can surround """
"""the device with `str()` to get its string identifier that will be internally mapped """
"""to the actual `jaxlib.xla_extension.Device`.""" )
UpperCAmelCase : Optional[int] = device if isinstance(A , A ) else str(jax.devices()[0] )
# using global variable since `jaxlib.xla_extension.Device` is not serializable neither
# with `pickle` nor with `dill`, so we need to use a global variable instead
global DEVICE_MAPPING
if DEVICE_MAPPING is None:
UpperCAmelCase : Any = self._map_devices_to_str()
if self.device not in list(DEVICE_MAPPING.keys() ):
logger.warning(
f'''Device with string identifier {self.device} not listed among the available '''
f'''devices: {list(DEVICE_MAPPING.keys() )}, so falling back to the default '''
f'''device: {str(jax.devices()[0] )}.''' )
UpperCAmelCase : List[Any] = str(jax.devices()[0] )
UpperCAmelCase : Union[str, Any] = jnp_array_kwargs
@staticmethod
def _lowercase( ) -> Dict[str, "jaxlib.xla_extension.Device"]:
import jax
return {str(A ): device for device in jax.devices()}
def _lowercase( self , A ) -> str:
import jax
import jax.numpy as jnp
if isinstance(A , A ) and column:
if all(
isinstance(A , jax.Array ) and x.shape == column[0].shape and x.dtype == column[0].dtype for x in column ):
return jnp.stack(A , axis=0 )
return column
def _lowercase( self , A ) -> Tuple:
import jax
import jax.numpy as jnp
if isinstance(A , (str, bytes, type(A )) ):
return value
elif isinstance(A , (np.character, np.ndarray) ) and np.issubdtype(value.dtype , np.character ):
return value.tolist()
UpperCAmelCase : List[str] = {}
if isinstance(A , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.integer ):
# the default int precision depends on the jax config
# see https://jax.readthedocs.io/en/latest/notebooks/Common_Gotchas_in_JAX.html#double-64bit-precision
if jax.config.jax_enable_xaa:
UpperCAmelCase : str = {"""dtype""": jnp.intaa}
else:
UpperCAmelCase : int = {"""dtype""": jnp.intaa}
elif isinstance(A , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.floating ):
UpperCAmelCase : Any = {"""dtype""": jnp.floataa}
elif config.PIL_AVAILABLE and "PIL" in sys.modules:
import PIL.Image
if isinstance(A , PIL.Image.Image ):
UpperCAmelCase : List[str] = np.asarray(A )
# using global variable since `jaxlib.xla_extension.Device` is not serializable neither
# with `pickle` nor with `dill`, so we need to use a global variable instead
global DEVICE_MAPPING
if DEVICE_MAPPING is None:
UpperCAmelCase : Dict = self._map_devices_to_str()
with jax.default_device(DEVICE_MAPPING[self.device] ):
# calling jnp.array on a np.ndarray does copy the data
# see https://github.com/google/jax/issues/4486
return jnp.array(A , **{**default_dtype, **self.jnp_array_kwargs} )
def _lowercase( self , A ) -> Tuple:
import jax
# support for torch, tf, jax etc.
if config.TORCH_AVAILABLE and "torch" in sys.modules:
import torch
if isinstance(A , torch.Tensor ):
return self._tensorize(data_struct.detach().cpu().numpy()[()] )
if hasattr(A , """__array__""" ) and not isinstance(A , jax.Array ):
UpperCAmelCase : Optional[int] = data_struct.__array__()
# support for nested types like struct of list of struct
if isinstance(A , np.ndarray ):
if data_struct.dtype == object: # jax arrays cannot be instantied from an array of objects
return self._consolidate([self.recursive_tensorize(A ) for substruct in data_struct] )
elif isinstance(A , (list, tuple) ):
return self._consolidate([self.recursive_tensorize(A ) for substruct in data_struct] )
return self._tensorize(A )
def _lowercase( self , A ) -> Dict:
return map_nested(self._recursive_tensorize , A , map_list=A )
def _lowercase( self , A ) -> Mapping:
UpperCAmelCase : Union[str, Any] = self.numpy_arrow_extractor().extract_row(A )
UpperCAmelCase : Dict = self.python_features_decoder.decode_row(A )
return self.recursive_tensorize(A )
def _lowercase( self , A ) -> "jax.Array":
UpperCAmelCase : int = self.numpy_arrow_extractor().extract_column(A )
UpperCAmelCase : Optional[Any] = self.python_features_decoder.decode_column(A , pa_table.column_names[0] )
UpperCAmelCase : Optional[int] = self.recursive_tensorize(A )
UpperCAmelCase : Any = self._consolidate(A )
return column
def _lowercase( self , A ) -> Mapping:
UpperCAmelCase : Optional[int] = self.numpy_arrow_extractor().extract_batch(A )
UpperCAmelCase : List[str] = self.python_features_decoder.decode_batch(A )
UpperCAmelCase : Union[str, Any] = self.recursive_tensorize(A )
for column_name in batch:
UpperCAmelCase : Optional[Any] = self._consolidate(batch[column_name] )
return batch
| 672 | 1 |
'''simple docstring'''
import os
from typing import List, Optional, Union
from ...tokenization_utils import PreTrainedTokenizer
from ...tokenization_utils_base import AddedToken
from ...utils import logging
a : Optional[int] = logging.get_logger(__name__)
a : Any = {"""vocab_file""": """vocab.txt"""}
a : List[Any] = {
"""vocab_file""": {
"""facebook/esm2_t6_8M_UR50D""": """https://huggingface.co/facebook/esm2_t6_8M_UR50D/resolve/main/vocab.txt""",
"""facebook/esm2_t12_35M_UR50D""": """https://huggingface.co/facebook/esm2_t12_35M_UR50D/resolve/main/vocab.txt""",
},
}
a : List[str] = {
"""facebook/esm2_t6_8M_UR50D""": 1_0_2_4,
"""facebook/esm2_t12_35M_UR50D""": 1_0_2_4,
}
def __lowerCamelCase ( _lowercase ) -> Optional[Any]:
with open(_lowercase , """r""" ) as f:
UpperCAmelCase : Optional[Any] = f.read().splitlines()
return [l.strip() for l in lines]
class UpperCamelCase_ ( __magic_name__ ):
lowercase = VOCAB_FILES_NAMES
lowercase = PRETRAINED_VOCAB_FILES_MAP
lowercase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase = ['input_ids', 'attention_mask']
def __init__( self , A , A="<unk>" , A="<cls>" , A="<pad>" , A="<mask>" , A="<eos>" , **A , ) -> Any:
super().__init__(**A )
UpperCAmelCase : int = load_vocab_file(A )
UpperCAmelCase : Tuple = dict(enumerate(self.all_tokens ) )
UpperCAmelCase : List[str] = {tok: ind for ind, tok in enumerate(self.all_tokens )}
UpperCAmelCase : List[str] = unk_token
UpperCAmelCase : List[Any] = cls_token
UpperCAmelCase : str = pad_token
UpperCAmelCase : int = mask_token
UpperCAmelCase : Tuple = eos_token
UpperCAmelCase : List[str] = self.all_tokens
self._create_trie(self.unique_no_split_tokens )
def _lowercase( self , A ) -> str:
return self._id_to_token.get(A , self.unk_token )
def _lowercase( self , A ) -> int:
return self._token_to_id.get(A , self._token_to_id.get(self.unk_token ) )
def _lowercase( self , A , **A ) -> Any:
return text.split()
def _lowercase( self , A=False ) -> Optional[int]:
return len(self._id_to_token )
def _lowercase( self ) -> Any:
return {token: i for i, token in enumerate(self.all_tokens )}
def _lowercase( self , A ) -> int:
return self._token_to_id.get(A , self._token_to_id.get(self.unk_token ) )
def _lowercase( self , A ) -> str:
return self._id_to_token.get(A , self.unk_token )
def _lowercase( self , A , A = None ) -> List[int]:
UpperCAmelCase : Optional[Any] = [self.cls_token_id]
UpperCAmelCase : str = [self.eos_token_id] # No sep token in ESM vocabulary
if token_ids_a is None:
if self.eos_token_id is None:
return cls + token_ids_a
else:
return cls + token_ids_a + sep
elif self.eos_token_id is None:
raise ValueError("""Cannot tokenize multiple sequences when EOS token is not set!""" )
return cls + token_ids_a + sep + token_ids_a + sep # Multiple inputs always have an EOS token
def _lowercase( self , A , A = None , A = False ) -> List[int]:
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
"""You should not supply a second sequence if the provided sequence of """
"""ids is already formatted with special tokens for the model.""" )
return [1 if token in self.all_special_ids else 0 for token in token_ids_a]
UpperCAmelCase : Optional[int] = [1] + ([0] * len(A )) + [1]
if token_ids_a is not None:
mask += [0] * len(A ) + [1]
return mask
def _lowercase( self , A , A ) -> Optional[int]:
UpperCAmelCase : Dict = os.path.join(A , (filename_prefix + """-""" if filename_prefix else """""") + """vocab.txt""" )
with open(A , """w""" ) as f:
f.write("""\n""".join(self.all_tokens ) )
return (vocab_file,)
@property
def _lowercase( self ) -> int:
return self.get_vocab_size(with_added_tokens=A )
def _lowercase( self , A , A = False ) -> int:
return super()._add_tokens(A , special_tokens=A )
| 672 |
'''simple docstring'''
from datetime import datetime as dt
import os
from github import Github
a : int = [
"""good first issue""",
"""good second issue""",
"""good difficult issue""",
"""feature request""",
"""new model""",
"""wip""",
]
def __lowerCamelCase ( ) -> Dict:
UpperCAmelCase : str = Github(os.environ["""GITHUB_TOKEN"""] )
UpperCAmelCase : Dict = g.get_repo("""huggingface/transformers""" )
UpperCAmelCase : int = repo.get_issues(state="""open""" )
for issue in open_issues:
UpperCAmelCase : Optional[int] = sorted([comment for comment in issue.get_comments()] , key=lambda _lowercase : i.created_at , reverse=_lowercase )
UpperCAmelCase : Any = comments[0] if len(_lowercase ) > 0 else None
if (
last_comment is not None
and last_comment.user.login == "github-actions[bot]"
and (dt.utcnow() - issue.updated_at).days > 7
and (dt.utcnow() - issue.created_at).days >= 3_0
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# print(f"Would close issue {issue.number} since it has been 7 days of inactivity since bot mention.")
issue.edit(state="""closed""" )
elif (
(dt.utcnow() - issue.updated_at).days > 2_3
and (dt.utcnow() - issue.created_at).days >= 3_0
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# print(f"Would add stale comment to {issue.number}")
issue.create_comment(
"""This issue has been automatically marked as stale because it has not had """
"""recent activity. If you think this still needs to be addressed """
"""please comment on this thread.\n\nPlease note that issues that do not follow the """
"""[contributing guidelines](https://github.com/huggingface/transformers/blob/main/CONTRIBUTING.md) """
"""are likely to be ignored.""" )
if __name__ == "__main__":
main()
| 672 | 1 |
'''simple docstring'''
import inspect
import unittest
from transformers import ConvNextVaConfig
from transformers.models.auto import get_values
from transformers.models.auto.modeling_auto import MODEL_FOR_BACKBONE_MAPPING_NAMES, MODEL_MAPPING_NAMES
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import ConvNextVaBackbone, ConvNextVaForImageClassification, ConvNextVaModel
from transformers.models.convnextva.modeling_convnextva import CONVNEXTV2_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class UpperCamelCase_ :
def __init__( self , A , A=13 , A=32 , A=3 , A=4 , A=[10, 20, 30, 40] , A=[2, 2, 3, 2] , A=True , A=True , A=37 , A="gelu" , A=10 , A=0.0_2 , A=["stage2", "stage3", "stage4"] , A=[2, 3, 4] , A=None , ) -> int:
UpperCAmelCase : str = parent
UpperCAmelCase : List[Any] = batch_size
UpperCAmelCase : Dict = image_size
UpperCAmelCase : Tuple = num_channels
UpperCAmelCase : Union[str, Any] = num_stages
UpperCAmelCase : Any = hidden_sizes
UpperCAmelCase : str = depths
UpperCAmelCase : Optional[int] = is_training
UpperCAmelCase : Union[str, Any] = use_labels
UpperCAmelCase : Any = intermediate_size
UpperCAmelCase : str = hidden_act
UpperCAmelCase : List[str] = num_labels
UpperCAmelCase : Tuple = initializer_range
UpperCAmelCase : Optional[Any] = out_features
UpperCAmelCase : List[str] = out_indices
UpperCAmelCase : Any = scope
def _lowercase( self ) -> Union[str, Any]:
UpperCAmelCase : Tuple = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCAmelCase : List[Any] = None
if self.use_labels:
UpperCAmelCase : Dict = ids_tensor([self.batch_size] , self.num_labels )
UpperCAmelCase : List[str] = self.get_config()
return config, pixel_values, labels
def _lowercase( self ) -> Optional[Any]:
return ConvNextVaConfig(
num_channels=self.num_channels , hidden_sizes=self.hidden_sizes , depths=self.depths , num_stages=self.num_stages , hidden_act=self.hidden_act , is_decoder=A , initializer_range=self.initializer_range , out_features=self.out_features , out_indices=self.out_indices , num_labels=self.num_labels , )
def _lowercase( self , A , A , A ) -> Optional[Any]:
UpperCAmelCase : int = ConvNextVaModel(config=A )
model.to(A )
model.eval()
UpperCAmelCase : List[Any] = model(A )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def _lowercase( self , A , A , A ) -> Any:
UpperCAmelCase : List[str] = ConvNextVaForImageClassification(A )
model.to(A )
model.eval()
UpperCAmelCase : int = model(A , labels=A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _lowercase( self , A , A , A ) -> Any:
UpperCAmelCase : Optional[Any] = ConvNextVaBackbone(config=A )
model.to(A )
model.eval()
UpperCAmelCase : Any = model(A )
# verify hidden states
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[1], 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , config.hidden_sizes[1:] )
# verify backbone works with out_features=None
UpperCAmelCase : Any = None
UpperCAmelCase : Optional[int] = ConvNextVaBackbone(config=A )
model.to(A )
model.eval()
UpperCAmelCase : int = model(A )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[-1], 1, 1] )
# verify channels
self.parent.assertEqual(len(model.channels ) , 1 )
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] )
def _lowercase( self ) -> List[str]:
UpperCAmelCase : Dict = self.prepare_config_and_inputs()
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : List[Any] = config_and_inputs
UpperCAmelCase : str = {"""pixel_values""": pixel_values}
return config, inputs_dict
def _lowercase( self ) -> List[Any]:
UpperCAmelCase : List[str] = self.prepare_config_and_inputs()
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : Optional[Any] = config_and_inputs
UpperCAmelCase : List[str] = {"""pixel_values""": pixel_values, """labels""": labels}
return config, inputs_dict
@require_torch
class UpperCamelCase_ ( __magic_name__ , __magic_name__ , unittest.TestCase ):
lowercase = (
(
ConvNextVaModel,
ConvNextVaForImageClassification,
ConvNextVaBackbone,
)
if is_torch_available()
else ()
)
lowercase = (
{'feature-extraction': ConvNextVaModel, 'image-classification': ConvNextVaForImageClassification}
if is_torch_available()
else {}
)
lowercase = False
lowercase = False
lowercase = False
lowercase = False
lowercase = False
def _lowercase( self ) -> Optional[int]:
UpperCAmelCase : Dict = ConvNextVaModelTester(self )
UpperCAmelCase : List[str] = ConfigTester(self , config_class=A , has_text_modality=A , hidden_size=37 )
def _lowercase( self ) -> int:
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def _lowercase( self ) -> List[str]:
return
@unittest.skip(reason="""ConvNextV2 does not use inputs_embeds""" )
def _lowercase( self ) -> Dict:
pass
@unittest.skip(reason="""ConvNextV2 does not support input and output embeddings""" )
def _lowercase( self ) -> Any:
pass
@unittest.skip(reason="""ConvNextV2 does not use feedforward chunking""" )
def _lowercase( self ) -> int:
pass
def _lowercase( self ) -> Dict:
if not self.model_tester.is_training:
return
for model_class in self.all_model_classes:
UpperCAmelCase , UpperCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs_with_labels()
UpperCAmelCase : Optional[int] = True
if model_class.__name__ in [
*get_values(A ),
*get_values(A ),
]:
continue
UpperCAmelCase : Any = model_class(A )
model.to(A )
model.train()
UpperCAmelCase : List[str] = self._prepare_for_class(A , A , return_labels=A )
UpperCAmelCase : List[str] = model(**A ).loss
loss.backward()
def _lowercase( self ) -> Tuple:
if not self.model_tester.is_training:
return
for model_class in self.all_model_classes:
UpperCAmelCase , UpperCAmelCase : Any = self.model_tester.prepare_config_and_inputs_with_labels()
UpperCAmelCase : List[str] = False
UpperCAmelCase : int = True
if (
model_class.__name__
in [*get_values(A ), *get_values(A )]
or not model_class.supports_gradient_checkpointing
):
continue
UpperCAmelCase : Dict = model_class(A )
model.to(A )
model.gradient_checkpointing_enable()
model.train()
UpperCAmelCase : Any = self._prepare_for_class(A , A , return_labels=A )
UpperCAmelCase : Any = model(**A ).loss
loss.backward()
def _lowercase( self ) -> Tuple:
UpperCAmelCase , UpperCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase : int = model_class(A )
UpperCAmelCase : List[str] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase : Tuple = [*signature.parameters.keys()]
UpperCAmelCase : Optional[Any] = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , A )
def _lowercase( self ) -> Optional[Any]:
UpperCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*A )
def _lowercase( self ) -> List[str]:
def check_hidden_states_output(A , A , A ):
UpperCAmelCase : Optional[Any] = model_class(A )
model.to(A )
model.eval()
with torch.no_grad():
UpperCAmelCase : Dict = model(**self._prepare_for_class(A , A ) )
UpperCAmelCase : Tuple = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
UpperCAmelCase : Optional[Any] = self.model_tester.num_stages
self.assertEqual(len(A ) , expected_num_stages + 1 )
# ConvNextV2's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
UpperCAmelCase , UpperCAmelCase : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase : str = True
check_hidden_states_output(A , A , A )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
UpperCAmelCase : int = True
check_hidden_states_output(A , A , A )
def _lowercase( self ) -> Tuple:
UpperCAmelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*A )
@slow
def _lowercase( self ) -> Any:
for model_name in CONVNEXTV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase : Tuple = ConvNextVaModel.from_pretrained(A )
self.assertIsNotNone(A )
def __lowerCamelCase ( ) -> Optional[int]:
UpperCAmelCase : Optional[int] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class UpperCamelCase_ ( unittest.TestCase ):
@cached_property
def _lowercase( self ) -> str:
return AutoImageProcessor.from_pretrained("""facebook/convnextv2-tiny-1k-224""" ) if is_vision_available() else None
@slow
def _lowercase( self ) -> List[Any]:
UpperCAmelCase : Any = ConvNextVaForImageClassification.from_pretrained("""facebook/convnextv2-tiny-1k-224""" ).to(A )
UpperCAmelCase : List[Any] = self.default_image_processor
UpperCAmelCase : Any = prepare_img()
UpperCAmelCase : Tuple = preprocessor(images=A , return_tensors="""pt""" ).to(A )
# forward pass
with torch.no_grad():
UpperCAmelCase : Optional[Any] = model(**A )
# verify the logits
UpperCAmelCase : Dict = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , A )
UpperCAmelCase : Dict = torch.tensor([0.9_9_9_6, 0.1_9_6_6, -0.4_3_8_6] ).to(A )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , A , atol=1e-4 ) )
| 672 |
'''simple docstring'''
import unittest
from parameterized import parameterized
from transformers import OpenLlamaConfig, is_torch_available, set_seed
from transformers.testing_utils import require_torch, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import OpenLlamaForCausalLM, OpenLlamaForSequenceClassification, OpenLlamaModel
class UpperCamelCase_ :
def __init__( self , A , A=13 , A=7 , A=True , A=True , A=False , A=True , A=99 , A=32 , A=5 , A=4 , A=37 , A="gelu" , A=0.1 , A=0.1 , A=512 , A=16 , A=2 , A=0.0_2 , A=3 , A=4 , A=None , ) -> Any:
UpperCAmelCase : Optional[int] = parent
UpperCAmelCase : List[Any] = batch_size
UpperCAmelCase : Union[str, Any] = seq_length
UpperCAmelCase : Optional[int] = is_training
UpperCAmelCase : str = use_input_mask
UpperCAmelCase : Optional[int] = use_token_type_ids
UpperCAmelCase : Dict = use_labels
UpperCAmelCase : str = vocab_size
UpperCAmelCase : Optional[int] = hidden_size
UpperCAmelCase : str = num_hidden_layers
UpperCAmelCase : Any = num_attention_heads
UpperCAmelCase : Union[str, Any] = intermediate_size
UpperCAmelCase : str = hidden_act
UpperCAmelCase : Any = hidden_dropout_prob
UpperCAmelCase : str = attention_probs_dropout_prob
UpperCAmelCase : Tuple = max_position_embeddings
UpperCAmelCase : Optional[Any] = type_vocab_size
UpperCAmelCase : Optional[Any] = type_sequence_label_size
UpperCAmelCase : str = initializer_range
UpperCAmelCase : List[Any] = num_labels
UpperCAmelCase : Dict = num_choices
UpperCAmelCase : Tuple = scope
def _lowercase( self ) -> Dict:
UpperCAmelCase : Dict = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCAmelCase : Optional[Any] = None
if self.use_input_mask:
UpperCAmelCase : Optional[int] = random_attention_mask([self.batch_size, self.seq_length] )
UpperCAmelCase : Optional[Any] = None
if self.use_token_type_ids:
UpperCAmelCase : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
UpperCAmelCase : Union[str, Any] = None
UpperCAmelCase : Dict = None
UpperCAmelCase : Union[str, Any] = None
if self.use_labels:
UpperCAmelCase : Optional[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCAmelCase : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
UpperCAmelCase : Optional[Any] = ids_tensor([self.batch_size] , self.num_choices )
UpperCAmelCase : int = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def _lowercase( self ) -> Dict:
return OpenLlamaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=A , initializer_range=self.initializer_range , use_stable_embedding=A , )
def _lowercase( self , A , A , A , A , A , A , A ) -> str:
UpperCAmelCase : Union[str, Any] = OpenLlamaModel(config=A )
model.to(A )
model.eval()
UpperCAmelCase : Dict = model(A , attention_mask=A )
UpperCAmelCase : Optional[int] = model(A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _lowercase( self , A , A , A , A , A , A , A , A , A , ) -> List[Any]:
UpperCAmelCase : Optional[int] = True
UpperCAmelCase : Union[str, Any] = OpenLlamaModel(A )
model.to(A )
model.eval()
UpperCAmelCase : List[Any] = model(
A , attention_mask=A , encoder_hidden_states=A , encoder_attention_mask=A , )
UpperCAmelCase : str = model(
A , attention_mask=A , encoder_hidden_states=A , )
UpperCAmelCase : List[Any] = model(A , attention_mask=A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _lowercase( self , A , A , A , A , A , A , A , A , A , ) -> int:
UpperCAmelCase : Optional[int] = OpenLlamaForCausalLM(config=A )
model.to(A )
model.eval()
UpperCAmelCase : Tuple = model(A , attention_mask=A , labels=A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _lowercase( self , A , A , A , A , A , A , A , A , A , ) -> int:
UpperCAmelCase : Dict = True
UpperCAmelCase : Tuple = True
UpperCAmelCase : str = OpenLlamaForCausalLM(config=A )
model.to(A )
model.eval()
# first forward pass
UpperCAmelCase : Union[str, Any] = model(
A , attention_mask=A , encoder_hidden_states=A , encoder_attention_mask=A , use_cache=A , )
UpperCAmelCase : str = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
UpperCAmelCase : Tuple = ids_tensor((self.batch_size, 3) , config.vocab_size )
UpperCAmelCase : Tuple = ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
UpperCAmelCase : List[str] = torch.cat([input_ids, next_tokens] , dim=-1 )
UpperCAmelCase : List[Any] = torch.cat([input_mask, next_mask] , dim=-1 )
UpperCAmelCase : List[Any] = model(
A , attention_mask=A , encoder_hidden_states=A , encoder_attention_mask=A , output_hidden_states=A , )["""hidden_states"""][0]
UpperCAmelCase : Optional[Any] = model(
A , attention_mask=A , encoder_hidden_states=A , encoder_attention_mask=A , past_key_values=A , output_hidden_states=A , )["""hidden_states"""][0]
# select random slice
UpperCAmelCase : str = ids_tensor((1,) , output_from_past.shape[-1] ).item()
UpperCAmelCase : Any = output_from_no_past[:, -3:, random_slice_idx].detach()
UpperCAmelCase : Dict = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(A , A , atol=1e-3 ) )
def _lowercase( self ) -> Union[str, Any]:
UpperCAmelCase : Union[str, Any] = self.prepare_config_and_inputs()
(
(
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) ,
) : Tuple = config_and_inputs
UpperCAmelCase : str = {"""input_ids""": input_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class UpperCamelCase_ ( __magic_name__ , __magic_name__ , __magic_name__ , unittest.TestCase ):
lowercase = (
(OpenLlamaModel, OpenLlamaForCausalLM, OpenLlamaForSequenceClassification) if is_torch_available() else ()
)
lowercase = (OpenLlamaForCausalLM,) if is_torch_available() else ()
lowercase = (
{
'feature-extraction': OpenLlamaModel,
'text-classification': OpenLlamaForSequenceClassification,
'text-generation': OpenLlamaForCausalLM,
'zero-shot': OpenLlamaForSequenceClassification,
}
if is_torch_available()
else {}
)
lowercase = False
lowercase = False
def _lowercase( self ) -> Tuple:
UpperCAmelCase : Dict = OpenLlamaModelTester(self )
UpperCAmelCase : List[str] = ConfigTester(self , config_class=A , hidden_size=37 )
def _lowercase( self ) -> Optional[Any]:
self.config_tester.run_common_tests()
def _lowercase( self ) -> int:
UpperCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*A )
def _lowercase( self ) -> str:
UpperCAmelCase : Any = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
UpperCAmelCase : int = type
self.model_tester.create_and_check_model(*A )
def _lowercase( self ) -> str:
UpperCAmelCase , UpperCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase : List[str] = 3
UpperCAmelCase : Optional[Any] = input_dict["""input_ids"""]
UpperCAmelCase : str = input_ids.ne(1 ).to(A )
UpperCAmelCase : Tuple = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
UpperCAmelCase : Optional[Any] = OpenLlamaForSequenceClassification(A )
model.to(A )
model.eval()
UpperCAmelCase : List[Any] = model(A , attention_mask=A , labels=A )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def _lowercase( self ) -> Union[str, Any]:
UpperCAmelCase , UpperCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase : Dict = 3
UpperCAmelCase : Any = """single_label_classification"""
UpperCAmelCase : Dict = input_dict["""input_ids"""]
UpperCAmelCase : Optional[Any] = input_ids.ne(1 ).to(A )
UpperCAmelCase : str = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
UpperCAmelCase : Optional[Any] = OpenLlamaForSequenceClassification(A )
model.to(A )
model.eval()
UpperCAmelCase : Tuple = model(A , attention_mask=A , labels=A )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def _lowercase( self ) -> int:
UpperCAmelCase , UpperCAmelCase : Dict = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase : Tuple = 3
UpperCAmelCase : Optional[Any] = """multi_label_classification"""
UpperCAmelCase : Dict = input_dict["""input_ids"""]
UpperCAmelCase : int = input_ids.ne(1 ).to(A )
UpperCAmelCase : int = ids_tensor(
[self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float )
UpperCAmelCase : Any = OpenLlamaForSequenceClassification(A )
model.to(A )
model.eval()
UpperCAmelCase : Dict = model(A , attention_mask=A , labels=A )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
@unittest.skip("""Open-Llama buffers include complex numbers, which breaks this test""" )
def _lowercase( self ) -> Dict:
pass
@parameterized.expand([("""linear""",), ("""dynamic""",)] )
def _lowercase( self , A ) -> str:
UpperCAmelCase , UpperCAmelCase : Any = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase : Dict = ids_tensor([1, 10] , config.vocab_size )
UpperCAmelCase : List[str] = ids_tensor([1, int(config.max_position_embeddings * 1.5 )] , config.vocab_size )
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
UpperCAmelCase : Any = OpenLlamaModel(A )
original_model.to(A )
original_model.eval()
UpperCAmelCase : List[str] = original_model(A ).last_hidden_state
UpperCAmelCase : List[Any] = original_model(A ).last_hidden_state
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
UpperCAmelCase : Union[str, Any] = {"""type""": scaling_type, """factor""": 1_0.0}
UpperCAmelCase : str = OpenLlamaModel(A )
scaled_model.to(A )
scaled_model.eval()
UpperCAmelCase : List[str] = scaled_model(A ).last_hidden_state
UpperCAmelCase : Optional[int] = scaled_model(A ).last_hidden_state
# Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original
# maximum sequence length, so the outputs for the short input should match.
if scaling_type == "dynamic":
self.assertTrue(torch.allclose(A , A , atol=1e-5 ) )
else:
self.assertFalse(torch.allclose(A , A , atol=1e-5 ) )
# The output should be different for long inputs
self.assertFalse(torch.allclose(A , A , atol=1e-5 ) )
| 672 | 1 |
'''simple docstring'''
from collections import deque
class UpperCamelCase_ :
def __init__( self , A , A , A ) -> None:
UpperCAmelCase : str = process_name # process name
UpperCAmelCase : List[Any] = arrival_time # arrival time of the process
# completion time of finished process or last interrupted time
UpperCAmelCase : Optional[int] = arrival_time
UpperCAmelCase : Optional[int] = burst_time # remaining burst time
UpperCAmelCase : List[Any] = 0 # total time of the process wait in ready queue
UpperCAmelCase : List[str] = 0 # time from arrival time to completion time
class UpperCamelCase_ :
def __init__( self , A , A , A , A , ) -> None:
# total number of mlfq's queues
UpperCAmelCase : Optional[Any] = number_of_queues
# time slice of queues that round robin algorithm applied
UpperCAmelCase : Tuple = time_slices
# unfinished process is in this ready_queue
UpperCAmelCase : Optional[Any] = queue
# current time
UpperCAmelCase : Optional[Any] = current_time
# finished process is in this sequence queue
UpperCAmelCase : deque[Process] = deque()
def _lowercase( self ) -> list[str]:
UpperCAmelCase : str = []
for i in range(len(self.finish_queue ) ):
sequence.append(self.finish_queue[i].process_name )
return sequence
def _lowercase( self , A ) -> list[int]:
UpperCAmelCase : Any = []
for i in range(len(A ) ):
waiting_times.append(queue[i].waiting_time )
return waiting_times
def _lowercase( self , A ) -> list[int]:
UpperCAmelCase : str = []
for i in range(len(A ) ):
turnaround_times.append(queue[i].turnaround_time )
return turnaround_times
def _lowercase( self , A ) -> list[int]:
UpperCAmelCase : Optional[Any] = []
for i in range(len(A ) ):
completion_times.append(queue[i].stop_time )
return completion_times
def _lowercase( self , A ) -> list[int]:
return [q.burst_time for q in queue]
def _lowercase( self , A ) -> int:
process.waiting_time += self.current_time - process.stop_time
return process.waiting_time
def _lowercase( self , A ) -> deque[Process]:
UpperCAmelCase : deque[Process] = deque() # sequence deque of finished process
while len(A ) != 0:
UpperCAmelCase : Tuple = ready_queue.popleft() # current process
# if process's arrival time is later than current time, update current time
if self.current_time < cp.arrival_time:
self.current_time += cp.arrival_time
# update waiting time of current process
self.update_waiting_time(A )
# update current time
self.current_time += cp.burst_time
# finish the process and set the process's burst-time 0
UpperCAmelCase : Optional[int] = 0
# set the process's turnaround time because it is finished
UpperCAmelCase : Dict = self.current_time - cp.arrival_time
# set the completion time
UpperCAmelCase : int = self.current_time
# add the process to queue that has finished queue
finished.append(A )
self.finish_queue.extend(A ) # add finished process to finish queue
# FCFS will finish all remaining processes
return finished
def _lowercase( self , A , A ) -> tuple[deque[Process], deque[Process]]:
UpperCAmelCase : deque[Process] = deque() # sequence deque of terminated process
# just for 1 cycle and unfinished processes will go back to queue
for _ in range(len(A ) ):
UpperCAmelCase : List[Any] = ready_queue.popleft() # current process
# if process's arrival time is later than current time, update current time
if self.current_time < cp.arrival_time:
self.current_time += cp.arrival_time
# update waiting time of unfinished processes
self.update_waiting_time(A )
# if the burst time of process is bigger than time-slice
if cp.burst_time > time_slice:
# use CPU for only time-slice
self.current_time += time_slice
# update remaining burst time
cp.burst_time -= time_slice
# update end point time
UpperCAmelCase : List[str] = self.current_time
# locate the process behind the queue because it is not finished
ready_queue.append(A )
else:
# use CPU for remaining burst time
self.current_time += cp.burst_time
# set burst time 0 because the process is finished
UpperCAmelCase : Any = 0
# set the finish time
UpperCAmelCase : Dict = self.current_time
# update the process' turnaround time because it is finished
UpperCAmelCase : Optional[int] = self.current_time - cp.arrival_time
# add the process to queue that has finished queue
finished.append(A )
self.finish_queue.extend(A ) # add finished process to finish queue
# return finished processes queue and remaining processes queue
return finished, ready_queue
def _lowercase( self ) -> deque[Process]:
# all queues except last one have round_robin algorithm
for i in range(self.number_of_queues - 1 ):
UpperCAmelCase , UpperCAmelCase : Any = self.round_robin(
self.ready_queue , self.time_slices[i] )
# the last queue has first_come_first_served algorithm
self.first_come_first_served(self.ready_queue )
return self.finish_queue
if __name__ == "__main__":
import doctest
a : Optional[int] = Process("""P1""", 0, 5_3)
a : Optional[Any] = Process("""P2""", 0, 1_7)
a : Union[str, Any] = Process("""P3""", 0, 6_8)
a : Optional[int] = Process("""P4""", 0, 2_4)
a : List[str] = 3
a : Union[str, Any] = [1_7, 2_5]
a : Tuple = deque([Pa, Pa, Pa, Pa])
if len(time_slices) != number_of_queues - 1:
raise SystemExit(0)
doctest.testmod(extraglobs={"""queue""": deque([Pa, Pa, Pa, Pa])})
a : int = Process("""P1""", 0, 5_3)
a : str = Process("""P2""", 0, 1_7)
a : Optional[int] = Process("""P3""", 0, 6_8)
a : Union[str, Any] = Process("""P4""", 0, 2_4)
a : Union[str, Any] = 3
a : Dict = [1_7, 2_5]
a : Union[str, Any] = deque([Pa, Pa, Pa, Pa])
a : Optional[int] = MLFQ(number_of_queues, time_slices, queue, 0)
a : List[Any] = mlfq.multi_level_feedback_queue()
# print total waiting times of processes(P1, P2, P3, P4)
print(
F'''waiting time:\
\t\t\t{MLFQ.calculate_waiting_time(mlfq, [Pa, Pa, Pa, Pa])}'''
)
# print completion times of processes(P1, P2, P3, P4)
print(
F'''completion time:\
\t\t{MLFQ.calculate_completion_time(mlfq, [Pa, Pa, Pa, Pa])}'''
)
# print total turnaround times of processes(P1, P2, P3, P4)
print(
F'''turnaround time:\
\t\t{MLFQ.calculate_turnaround_time(mlfq, [Pa, Pa, Pa, Pa])}'''
)
# print sequence of finished processes
print(
F'''sequence of finished processes:\
{mlfq.calculate_sequence_of_finish_queue()}'''
)
| 672 |
'''simple docstring'''
import math
def __lowerCamelCase ( _lowercase ) -> bool:
assert isinstance(_lowercase , _lowercase ) and (
number >= 0
), "'number' must been an int and positive"
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or not number % 2:
# Negatives, 0, 1 and all even numbers are not primes
return False
UpperCAmelCase : str = range(3 , int(math.sqrt(_lowercase ) + 1 ) , 2 )
return not any(not number % i for i in odd_numbers )
def __lowerCamelCase ( _lowercase , _lowercase=1 , **_lowercase ) -> Union[str, Any]:
UpperCAmelCase : Optional[int] = factor * value
UpperCAmelCase : List[Any] = value
while not is_prime(_lowercase ):
value += 1 if not ("desc" in kwargs and kwargs["desc"] is True) else -1
if value == first_value_val:
return next_prime(value + 1 , **_lowercase )
return value
| 672 | 1 |
'''simple docstring'''
a : List[str] = [4, 1, 7, 4, 2, 6, 4, 1, 5, 3, 7, 5]
a : str = [3, 7, 7, 4, 2, 6, 4, 1, 5, 3, 7, 5]
a : int = {
0: """Sunday""",
1: """Monday""",
2: """Tuesday""",
3: """Wednesday""",
4: """Thursday""",
5: """Friday""",
6: """Saturday""",
}
def __lowerCamelCase ( _lowercase , _lowercase , _lowercase ) -> str:
assert len(str(_lowercase ) ) > 2, "year should be in YYYY format"
assert 1 <= month <= 1_2, "month should be between 1 to 12"
assert 1 <= day <= 3_1, "day should be between 1 to 31"
# Doomsday algorithm:
UpperCAmelCase : str = year // 1_0_0
UpperCAmelCase : List[str] = (5 * (century % 4) + 2) % 7
UpperCAmelCase : str = year % 1_0_0
UpperCAmelCase : int = centurian % 1_2
UpperCAmelCase : int = (
(centurian // 1_2) + centurian_m + (centurian_m // 4) + century_anchor
) % 7
UpperCAmelCase : Dict = (
DOOMSDAY_NOT_LEAP[month - 1]
if (year % 4 != 0) or (centurian == 0 and (year % 4_0_0) == 0)
else DOOMSDAY_LEAP[month - 1]
)
UpperCAmelCase : List[Any] = (dooms_day + day - day_anchor) % 7
return WEEK_DAY_NAMES[week_day]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 672 |
'''simple docstring'''
def __lowerCamelCase ( _lowercase = "The quick brown fox jumps over the lazy dog" , ) -> bool:
UpperCAmelCase : Union[str, Any] = set()
# Replace all the whitespace in our sentence
UpperCAmelCase : List[str] = input_str.replace(""" """ , """""" )
for alpha in input_str:
if "a" <= alpha.lower() <= "z":
frequency.add(alpha.lower() )
return len(_lowercase ) == 2_6
def __lowerCamelCase ( _lowercase = "The quick brown fox jumps over the lazy dog" , ) -> bool:
UpperCAmelCase : Tuple = [False] * 2_6
for char in input_str:
if char.islower():
UpperCAmelCase : Any = True
elif char.isupper():
UpperCAmelCase : Union[str, Any] = True
return all(_lowercase )
def __lowerCamelCase ( _lowercase = "The quick brown fox jumps over the lazy dog" , ) -> bool:
return len({char for char in input_str.lower() if char.isalpha()} ) == 2_6
def __lowerCamelCase ( ) -> None:
from timeit import timeit
UpperCAmelCase : str = """from __main__ import is_pangram, is_pangram_faster, is_pangram_fastest"""
print(timeit("""is_pangram()""" , setup=_lowercase ) )
print(timeit("""is_pangram_faster()""" , setup=_lowercase ) )
print(timeit("""is_pangram_fastest()""" , setup=_lowercase ) )
# 5.348480500048026, 2.6477354579837993, 1.8470395830227062
# 5.036091582966037, 2.644472333951853, 1.8869528750656173
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 672 | 1 |
'''simple docstring'''
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
StableDiffusionSAGPipeline,
UNetaDConditionModel,
)
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class UpperCamelCase_ ( __magic_name__ , __magic_name__ , unittest.TestCase ):
lowercase = StableDiffusionSAGPipeline
lowercase = TEXT_TO_IMAGE_PARAMS
lowercase = TEXT_TO_IMAGE_BATCH_PARAMS
lowercase = TEXT_TO_IMAGE_IMAGE_PARAMS
lowercase = TEXT_TO_IMAGE_IMAGE_PARAMS
lowercase = False
def _lowercase( self ) -> Any:
torch.manual_seed(0 )
UpperCAmelCase : int = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=32 , )
UpperCAmelCase : str = DDIMScheduler(
beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , beta_schedule="""scaled_linear""" , clip_sample=A , set_alpha_to_one=A , )
torch.manual_seed(0 )
UpperCAmelCase : List[Any] = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , )
torch.manual_seed(0 )
UpperCAmelCase : List[Any] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
UpperCAmelCase : Optional[int] = CLIPTextModel(A )
UpperCAmelCase : List[Any] = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
UpperCAmelCase : List[Any] = {
"""unet""": unet,
"""scheduler""": scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""safety_checker""": None,
"""feature_extractor""": None,
}
return components
def _lowercase( self , A , A=0 ) -> Tuple:
if str(A ).startswith("""mps""" ):
UpperCAmelCase : List[str] = torch.manual_seed(A )
else:
UpperCAmelCase : Optional[Any] = torch.Generator(device=A ).manual_seed(A )
UpperCAmelCase : Tuple = {
"""prompt""": """.""",
"""generator""": generator,
"""num_inference_steps""": 2,
"""guidance_scale""": 1.0,
"""sag_scale""": 1.0,
"""output_type""": """numpy""",
}
return inputs
def _lowercase( self ) -> List[str]:
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
@slow
@require_torch_gpu
class UpperCamelCase_ ( unittest.TestCase ):
def _lowercase( self ) -> str:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _lowercase( self ) -> Optional[int]:
UpperCAmelCase : Optional[Any] = StableDiffusionSAGPipeline.from_pretrained("""CompVis/stable-diffusion-v1-4""" )
UpperCAmelCase : Tuple = sag_pipe.to(A )
sag_pipe.set_progress_bar_config(disable=A )
UpperCAmelCase : int = """."""
UpperCAmelCase : int = torch.manual_seed(0 )
UpperCAmelCase : Dict = sag_pipe(
[prompt] , generator=A , guidance_scale=7.5 , sag_scale=1.0 , num_inference_steps=20 , output_type="""np""" )
UpperCAmelCase : int = output.images
UpperCAmelCase : Any = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
UpperCAmelCase : int = np.array([0.1_5_6_8, 0.1_7_3_8, 0.1_6_9_5, 0.1_6_9_3, 0.1_5_0_7, 0.1_7_0_5, 0.1_5_4_7, 0.1_7_5_1, 0.1_9_4_9] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5e-2
def _lowercase( self ) -> Any:
UpperCAmelCase : Dict = StableDiffusionSAGPipeline.from_pretrained("""stabilityai/stable-diffusion-2-1-base""" )
UpperCAmelCase : Tuple = sag_pipe.to(A )
sag_pipe.set_progress_bar_config(disable=A )
UpperCAmelCase : Tuple = """."""
UpperCAmelCase : Tuple = torch.manual_seed(0 )
UpperCAmelCase : str = sag_pipe(
[prompt] , generator=A , guidance_scale=7.5 , sag_scale=1.0 , num_inference_steps=20 , output_type="""np""" )
UpperCAmelCase : Optional[int] = output.images
UpperCAmelCase : str = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
UpperCAmelCase : Dict = np.array([0.3_4_5_9, 0.2_8_7_6, 0.2_5_3_7, 0.3_0_0_2, 0.2_6_7_1, 0.2_1_6_0, 0.3_0_2_6, 0.2_2_6_2, 0.2_3_7_1] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5e-2
def _lowercase( self ) -> Optional[int]:
UpperCAmelCase : Union[str, Any] = StableDiffusionSAGPipeline.from_pretrained("""stabilityai/stable-diffusion-2-1-base""" )
UpperCAmelCase : List[str] = sag_pipe.to(A )
sag_pipe.set_progress_bar_config(disable=A )
UpperCAmelCase : List[Any] = """."""
UpperCAmelCase : List[Any] = torch.manual_seed(0 )
UpperCAmelCase : Any = sag_pipe(
[prompt] , width=768 , height=512 , generator=A , guidance_scale=7.5 , sag_scale=1.0 , num_inference_steps=20 , output_type="""np""" , )
UpperCAmelCase : Optional[Any] = output.images
assert image.shape == (1, 512, 768, 3)
| 672 |
'''simple docstring'''
from sklearn.metrics import mean_squared_error
import datasets
a : Union[str, Any] = """\
@article{scikit-learn,
title={Scikit-learn: Machine Learning in {P}ython},
author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V.
and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P.
and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and
Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.},
journal={Journal of Machine Learning Research},
volume={12},
pages={2825--2830},
year={2011}
}
"""
a : int = """\
Mean Squared Error(MSE) is the average of the square of difference between the predicted
and actual values.
"""
a : int = """
Args:
predictions: array-like of shape (n_samples,) or (n_samples, n_outputs)
Estimated target values.
references: array-like of shape (n_samples,) or (n_samples, n_outputs)
Ground truth (correct) target values.
sample_weight: array-like of shape (n_samples,), default=None
Sample weights.
multioutput: {\"raw_values\", \"uniform_average\"} or array-like of shape (n_outputs,), default=\"uniform_average\"
Defines aggregating of multiple output values. Array-like value defines weights used to average errors.
\"raw_values\" : Returns a full set of errors in case of multioutput input.
\"uniform_average\" : Errors of all outputs are averaged with uniform weight.
squared : bool, default=True
If True returns MSE value, if False returns RMSE (Root Mean Squared Error) value.
Returns:
mse : mean squared error.
Examples:
>>> mse_metric = datasets.load_metric(\"mse\")
>>> predictions = [2.5, 0.0, 2, 8]
>>> references = [3, -0.5, 2, 7]
>>> results = mse_metric.compute(predictions=predictions, references=references)
>>> print(results)
{'mse': 0.375}
>>> rmse_result = mse_metric.compute(predictions=predictions, references=references, squared=False)
>>> print(rmse_result)
{'mse': 0.6123724356957945}
If you're using multi-dimensional lists, then set the config as follows :
>>> mse_metric = datasets.load_metric(\"mse\", \"multilist\")
>>> predictions = [[0.5, 1], [-1, 1], [7, -6]]
>>> references = [[0, 2], [-1, 2], [8, -5]]
>>> results = mse_metric.compute(predictions=predictions, references=references)
>>> print(results)
{'mse': 0.7083333333333334}
>>> results = mse_metric.compute(predictions=predictions, references=references, multioutput='raw_values')
>>> print(results) # doctest: +NORMALIZE_WHITESPACE
{'mse': array([0.41666667, 1. ])}
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class UpperCamelCase_ ( datasets.Metric ):
def _lowercase( self ) -> List[str]:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(self._get_feature_types() ) , reference_urls=[
"""https://scikit-learn.org/stable/modules/generated/sklearn.metrics.mean_squared_error.html"""
] , )
def _lowercase( self ) -> List[Any]:
if self.config_name == "multilist":
return {
"predictions": datasets.Sequence(datasets.Value("""float""" ) ),
"references": datasets.Sequence(datasets.Value("""float""" ) ),
}
else:
return {
"predictions": datasets.Value("""float""" ),
"references": datasets.Value("""float""" ),
}
def _lowercase( self , A , A , A=None , A="uniform_average" , A=True ) -> List[Any]:
UpperCAmelCase : List[Any] = mean_squared_error(
A , A , sample_weight=A , multioutput=A , squared=A )
return {"mse": mse}
| 672 | 1 |
'''simple docstring'''
import argparse
import gc
import json
import os
import re
import torch
from huggingface_hub import hf_hub_download
from transformers import AutoModelForCausalLM, AutoTokenizer, PreTrainedTokenizerFast, RwkvConfig
from transformers.modeling_utils import WEIGHTS_INDEX_NAME, shard_checkpoint
a : int = {
"""169M""": 1_2,
"""430M""": 2_4,
"""1B5""": 2_4,
"""3B""": 3_2,
"""7B""": 3_2,
"""14B""": 4_0,
}
a : List[str] = {
"""169M""": 7_6_8,
"""430M""": 1_0_2_4,
"""1B5""": 2_0_4_8,
"""3B""": 2_5_6_0,
"""7B""": 4_0_9_6,
"""14B""": 5_1_2_0,
}
def __lowerCamelCase ( _lowercase ) -> Optional[Any]:
UpperCAmelCase : Optional[Any] = list(state_dict.keys() )
for name in state_dict_keys:
UpperCAmelCase : List[Any] = state_dict.pop(_lowercase )
# emb -> embedding
if name.startswith("""emb.""" ):
UpperCAmelCase : Any = name.replace("""emb.""" , """embeddings.""" )
# ln_0 -> pre_ln (only present at block 0)
if name.startswith("""blocks.0.ln0""" ):
UpperCAmelCase : Dict = name.replace("""blocks.0.ln0""" , """blocks.0.pre_ln""" )
# att -> attention
UpperCAmelCase : Optional[Any] = re.sub(R"""blocks\.(\d+)\.att""" , R"""blocks.\1.attention""" , _lowercase )
# ffn -> feed_forward
UpperCAmelCase : List[Any] = re.sub(R"""blocks\.(\d+)\.ffn""" , R"""blocks.\1.feed_forward""" , _lowercase )
# time_mix_k -> time_mix_key and reshape
if name.endswith(""".time_mix_k""" ):
UpperCAmelCase : List[str] = name.replace(""".time_mix_k""" , """.time_mix_key""" )
# time_mix_v -> time_mix_value and reshape
if name.endswith(""".time_mix_v""" ):
UpperCAmelCase : Any = name.replace(""".time_mix_v""" , """.time_mix_value""" )
# time_mix_r -> time_mix_key and reshape
if name.endswith(""".time_mix_r""" ):
UpperCAmelCase : Optional[Any] = name.replace(""".time_mix_r""" , """.time_mix_receptance""" )
if name != "head.weight":
UpperCAmelCase : Tuple = """rwkv.""" + name
UpperCAmelCase : Union[str, Any] = weight
return state_dict
def __lowerCamelCase ( _lowercase , _lowercase , _lowercase , _lowercase=None , _lowercase=None , _lowercase=False , _lowercase=None ) -> Dict:
# 1. If possible, build the tokenizer.
if tokenizer_file is None:
print("""No `--tokenizer_file` provided, we will use the default tokenizer.""" )
UpperCAmelCase : Dict = 5_0_2_7_7
UpperCAmelCase : str = AutoTokenizer.from_pretrained("""EleutherAI/gpt-neox-20b""" )
else:
UpperCAmelCase : List[Any] = PreTrainedTokenizerFast(tokenizer_file=_lowercase )
UpperCAmelCase : int = len(_lowercase )
tokenizer.save_pretrained(_lowercase )
# 2. Build the config
UpperCAmelCase : Dict = list(NUM_HIDDEN_LAYERS_MAPPING.keys() )
if size is None:
# Try to infer size from the checkpoint name
for candidate in possible_sizes:
if candidate in checkpoint_file:
UpperCAmelCase : Any = candidate
break
if size is None:
raise ValueError("""Could not infer the size, please provide it with the `--size` argument.""" )
if size not in possible_sizes:
raise ValueError(F'''`size` should be one of {possible_sizes}, got {size}.''' )
UpperCAmelCase : Any = RwkvConfig(
vocab_size=_lowercase , num_hidden_layers=NUM_HIDDEN_LAYERS_MAPPING[size] , hidden_size=HIDEN_SIZE_MAPPING[size] , )
config.save_pretrained(_lowercase )
# 3. Download model file then convert state_dict
UpperCAmelCase : Optional[int] = hf_hub_download(_lowercase , _lowercase )
UpperCAmelCase : int = torch.load(_lowercase , map_location="""cpu""" )
UpperCAmelCase : List[str] = convert_state_dict(_lowercase )
# 4. Split in shards and save
UpperCAmelCase , UpperCAmelCase : Union[str, Any] = shard_checkpoint(_lowercase )
for shard_file, shard in shards.items():
torch.save(_lowercase , os.path.join(_lowercase , _lowercase ) )
if index is not None:
UpperCAmelCase : Optional[Any] = os.path.join(_lowercase , _lowercase )
# Save the index as well
with open(_lowercase , """w""" , encoding="""utf-8""" ) as f:
UpperCAmelCase : Any = json.dumps(_lowercase , indent=2 , sort_keys=_lowercase ) + """\n"""
f.write(_lowercase )
# 5. Clean up shards (for some reason the file PyTorch saves take the same space as the whole state_dict
print(
"""Cleaning up shards. This may error with an OOM error, it this is the case don't worry you still have converted the model.""" )
UpperCAmelCase : Optional[int] = list(shards.keys() )
del state_dict
del shards
gc.collect()
for shard_file in shard_files:
UpperCAmelCase : Union[str, Any] = torch.load(os.path.join(_lowercase , _lowercase ) )
torch.save({k: v.cpu().clone() for k, v in state_dict.items()} , os.path.join(_lowercase , _lowercase ) )
del state_dict
gc.collect()
if push_to_hub:
if model_name is None:
raise ValueError("""Please provide a `model_name` to push the model to the Hub.""" )
UpperCAmelCase : Any = AutoModelForCausalLM.from_pretrained(_lowercase )
model.push_to_hub(_lowercase , max_shard_size="""2GB""" )
tokenizer.push_to_hub(_lowercase )
if __name__ == "__main__":
a : Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--repo_id""", default=None, type=str, required=True, help="""Repo ID from which to pull the checkpoint."""
)
parser.add_argument(
"""--checkpoint_file""", default=None, type=str, required=True, help="""Name of the checkpoint file in the repo."""
)
parser.add_argument(
"""--output_dir""", default=None, type=str, required=True, help="""Where to save the converted model."""
)
parser.add_argument(
"""--tokenizer_file""",
default=None,
type=str,
help="""Path to the tokenizer file to use (if not provided, only the model is converted).""",
)
parser.add_argument(
"""--size""",
default=None,
type=str,
help="""Size of the model. Will be inferred from the `checkpoint_file` if not passed.""",
)
parser.add_argument(
"""--push_to_hub""",
action="""store_true""",
help="""Push to the Hub the converted model.""",
)
parser.add_argument(
"""--model_name""",
default=None,
type=str,
help="""Name of the pushed model on the Hub, including the username / organization.""",
)
a : Any = parser.parse_args()
convert_rmkv_checkpoint_to_hf_format(
args.repo_id,
args.checkpoint_file,
args.output_dir,
size=args.size,
tokenizer_file=args.tokenizer_file,
push_to_hub=args.push_to_hub,
model_name=args.model_name,
)
| 672 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a : str = logging.get_logger(__name__)
a : Any = {
"""google/fnet-base""": """https://huggingface.co/google/fnet-base/resolve/main/config.json""",
"""google/fnet-large""": """https://huggingface.co/google/fnet-large/resolve/main/config.json"""
# See all FNet models at https://huggingface.co/models?filter=fnet
}
class UpperCamelCase_ ( __magic_name__ ):
lowercase = 'fnet'
def __init__( self , A=32000 , A=768 , A=12 , A=3072 , A="gelu_new" , A=0.1 , A=512 , A=4 , A=0.0_2 , A=1e-12 , A=False , A=512 , A=3 , A=1 , A=2 , **A , ) -> int:
super().__init__(pad_token_id=A , bos_token_id=A , eos_token_id=A , **A )
UpperCAmelCase : Optional[Any] = vocab_size
UpperCAmelCase : Dict = max_position_embeddings
UpperCAmelCase : List[str] = hidden_size
UpperCAmelCase : int = num_hidden_layers
UpperCAmelCase : Any = intermediate_size
UpperCAmelCase : List[str] = hidden_act
UpperCAmelCase : Tuple = hidden_dropout_prob
UpperCAmelCase : List[str] = initializer_range
UpperCAmelCase : List[Any] = type_vocab_size
UpperCAmelCase : int = layer_norm_eps
UpperCAmelCase : Optional[Any] = use_tpu_fourier_optimizations
UpperCAmelCase : List[Any] = tpu_short_seq_length
| 672 | 1 |
'''simple docstring'''
import pprint
import requests
a : Optional[Any] = """https://zenquotes.io/api"""
def __lowerCamelCase ( ) -> list:
return requests.get(API_ENDPOINT_URL + """/today""" ).json()
def __lowerCamelCase ( ) -> list:
return requests.get(API_ENDPOINT_URL + """/random""" ).json()
if __name__ == "__main__":
a : str = random_quotes()
pprint.pprint(response)
| 672 |
'''simple docstring'''
a : List[Any] = """Alexander Joslin"""
import operator as op
from .stack import Stack
def __lowerCamelCase ( _lowercase ) -> int:
UpperCAmelCase : Dict = {"""*""": op.mul, """/""": op.truediv, """+""": op.add, """-""": op.sub}
UpperCAmelCase : Stack[int] = Stack()
UpperCAmelCase : Stack[str] = Stack()
for i in equation:
if i.isdigit():
# RULE 1
operand_stack.push(int(_lowercase ) )
elif i in operators:
# RULE 2
operator_stack.push(_lowercase )
elif i == ")":
# RULE 4
UpperCAmelCase : List[Any] = operator_stack.peek()
operator_stack.pop()
UpperCAmelCase : str = operand_stack.peek()
operand_stack.pop()
UpperCAmelCase : str = operand_stack.peek()
operand_stack.pop()
UpperCAmelCase : List[Any] = operators[opr](_lowercase , _lowercase )
operand_stack.push(_lowercase )
# RULE 5
return operand_stack.peek()
if __name__ == "__main__":
a : Tuple = """(5 + ((4 * 2) * (2 + 3)))"""
# answer = 45
print(F'''{equation} = {dijkstras_two_stack_algorithm(equation)}''')
| 672 | 1 |
'''simple docstring'''
import os
import re
import warnings
from shutil import copyfile
from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
if TYPE_CHECKING:
from ...tokenization_utils_base import TextInput
from ...utils import logging
a : Tuple = logging.get_logger(__name__)
a : Optional[int] = {"""vocab_file""": """spiece.model"""}
a : Union[str, Any] = {
"""vocab_file""": {
"""t5-small""": """https://huggingface.co/t5-small/resolve/main/spiece.model""",
"""t5-base""": """https://huggingface.co/t5-base/resolve/main/spiece.model""",
"""t5-large""": """https://huggingface.co/t5-large/resolve/main/spiece.model""",
"""t5-3b""": """https://huggingface.co/t5-3b/resolve/main/spiece.model""",
"""t5-11b""": """https://huggingface.co/t5-11b/resolve/main/spiece.model""",
}
}
# TODO(PVP) - this should be removed in Transformers v5
a : Dict = {
"""t5-small""": 5_1_2,
"""t5-base""": 5_1_2,
"""t5-large""": 5_1_2,
"""t5-3b""": 5_1_2,
"""t5-11b""": 5_1_2,
}
a : List[str] = """▁"""
class UpperCamelCase_ ( __magic_name__ ):
lowercase = VOCAB_FILES_NAMES
lowercase = PRETRAINED_VOCAB_FILES_MAP
lowercase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase = ['input_ids', 'attention_mask']
def __init__( self , A , A="</s>" , A="<unk>" , A="<pad>" , A=100 , A=None , A = None , A=True , **A , ) -> None:
# Add extra_ids to the special token list
if extra_ids > 0 and additional_special_tokens is None:
UpperCAmelCase : List[str] = [f'''<extra_id_{i}>''' for i in range(A )]
elif extra_ids > 0 and additional_special_tokens is not None:
# Check that we have the right number of extra_id special tokens
UpperCAmelCase : int = len(set(filter(lambda A : bool("""extra_id""" in str(A ) ) , A ) ) )
if extra_tokens != extra_ids:
raise ValueError(
f'''Both extra_ids ({extra_ids}) and additional_special_tokens ({additional_special_tokens}) are'''
""" provided to T5Tokenizer. In this case the additional_special_tokens must include the extra_ids"""
""" tokens""" )
if legacy:
logger.warning_once(
f'''You are using the legacy behaviour of the {self.__class__}. This means that tokens that come after special tokens will not be properly handled. We recommend you to'''
""" read the related pull request available at https://github.com/huggingface/transformers/pull/24565""" )
UpperCAmelCase : Optional[int] = legacy
UpperCAmelCase : Optional[int] = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
eos_token=A , unk_token=A , pad_token=A , extra_ids=A , additional_special_tokens=A , sp_model_kwargs=self.sp_model_kwargs , legacy=A , **A , )
UpperCAmelCase : Optional[Any] = vocab_file
UpperCAmelCase : int = extra_ids
UpperCAmelCase : Any = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(A )
@staticmethod
def _lowercase( A , A , A ) -> List[Any]:
if pretrained_model_name_or_path in TaTokenizer.max_model_input_sizes:
UpperCAmelCase : Optional[int] = TaTokenizer.max_model_input_sizes[pretrained_model_name_or_path]
if init_max_model_length is not None and init_max_model_length != max_model_length:
return init_max_model_length
elif init_max_model_length is None:
warnings.warn(
"""This tokenizer was incorrectly instantiated with a model max length of"""
f''' {deprecated_max_model_length} which will be corrected in Transformers v5.\nFor now, this'''
""" behavior is kept to avoid breaking backwards compatibility when padding/encoding with"""
""" `truncation is True`.\n- Be aware that you SHOULD NOT rely on"""
f''' {pretrained_model_name_or_path} automatically truncating your input to'''
f''' {deprecated_max_model_length} when padding/encoding.\n- If you want to encode/pad to sequences'''
f''' longer than {deprecated_max_model_length} you can either instantiate this tokenizer with'''
""" `model_max_length` or pass `max_length` when encoding/padding.\n- To avoid this warning, please"""
""" instantiate this tokenizer with `model_max_length` set to your preferred value.""" , A , )
return max_model_length
@property
def _lowercase( self ) -> Optional[int]:
return self.sp_model.get_piece_size() + self._extra_ids
def _lowercase( self ) -> Optional[int]:
UpperCAmelCase : List[Any] = {self.convert_ids_to_tokens(A ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def _lowercase( self , A , A = None , A = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=A , token_ids_a=A , already_has_special_tokens=A )
# normal case: some special tokens
if token_ids_a is None:
return ([0] * len(A )) + [1]
return ([0] * len(A )) + [1] + ([0] * len(A )) + [1]
def _lowercase( self ) -> int:
return list(
set(filter(lambda A : bool(re.search(r"""<extra_id_\d+>""" , A ) ) is not None , self.additional_special_tokens ) ) )
def _lowercase( self ) -> Optional[int]:
return [self._convert_token_to_id(A ) for token in self.get_sentinel_tokens()]
def _lowercase( self , A ) -> List[int]:
if len(A ) > 0 and token_ids[-1] == self.eos_token_id:
warnings.warn(
f'''This sequence already has {self.eos_token}. In future versions this behavior may lead to duplicated'''
""" eos tokens being added.""" )
return token_ids
else:
return token_ids + [self.eos_token_id]
def _lowercase( self , A , A = None ) -> List[int]:
UpperCAmelCase : Tuple = [self.eos_token_id]
if token_ids_a is None:
return len(token_ids_a + eos ) * [0]
return len(token_ids_a + eos + token_ids_a + eos ) * [0]
def _lowercase( self , A , A = None ) -> List[int]:
UpperCAmelCase : int = self._add_eos_if_not_present(A )
if token_ids_a is None:
return token_ids_a
else:
UpperCAmelCase : str = self._add_eos_if_not_present(A )
return token_ids_a + token_ids_a
def __getstate__( self ) -> Tuple:
UpperCAmelCase : List[str] = self.__dict__.copy()
UpperCAmelCase : Optional[int] = None
return state
def __setstate__( self , A ) -> Optional[Any]:
UpperCAmelCase : Any = d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs""" ):
UpperCAmelCase : List[Any] = {}
UpperCAmelCase : List[str] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def _lowercase( self , A , **A ) -> List[str]:
# Replace the SPIECE_UNDERLINE with a space to make sure SPIECE_UNDERLINE is only used at
# the beginning of the text
if not self.legacy:
UpperCAmelCase : Union[str, Any] = SPIECE_UNDERLINE + text.replace(A , """ """ )
return super().tokenize(A , **A )
def _lowercase( self , A , **A ) -> Optional[Any]:
if not self.legacy:
UpperCAmelCase : List[str] = text.startswith(A )
if is_first:
UpperCAmelCase : List[str] = text[1:]
UpperCAmelCase : Union[str, Any] = self.sp_model.encode(A , out_type=A )
if not self.legacy and not is_first and not text.startswith(""" """ ) and tokens[0].startswith(A ):
UpperCAmelCase : Optional[Any] = ([tokens[0][1:]] if len(tokens[0] ) > 1 else []) + tokens[1:]
return tokens
def _lowercase( self , A ) -> List[Any]:
if token.startswith("""<extra_id_""" ):
UpperCAmelCase : Optional[Any] = re.match(r"""<extra_id_(\d+)>""" , A )
UpperCAmelCase : List[Any] = int(match.group(1 ) )
return self.vocab_size - num - 1
return self.sp_model.piece_to_id(A )
def _lowercase( self , A ) -> str:
if index < self.sp_model.get_piece_size():
UpperCAmelCase : Tuple = self.sp_model.IdToPiece(A )
else:
UpperCAmelCase : Optional[int] = f'''<extra_id_{self.vocab_size - 1 - index}>'''
return token
def _lowercase( self , A ) -> List[Any]:
UpperCAmelCase : Optional[Any] = []
UpperCAmelCase : Optional[int] = """"""
UpperCAmelCase : Optional[int] = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(A ) + token
UpperCAmelCase : Dict = True
UpperCAmelCase : str = []
else:
current_sub_tokens.append(A )
UpperCAmelCase : List[Any] = False
out_string += self.sp_model.decode(A )
return out_string.strip()
def _lowercase( self , A , A = None ) -> Tuple[str]:
if not os.path.isdir(A ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
UpperCAmelCase : Union[str, Any] = os.path.join(
A , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(A ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , A )
elif not os.path.isfile(self.vocab_file ):
with open(A , """wb""" ) as fi:
UpperCAmelCase : Dict = self.sp_model.serialized_model_proto()
fi.write(A )
return (out_vocab_file,)
| 672 |
'''simple docstring'''
import argparse
import json
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
ConditionalDetrConfig,
ConditionalDetrForObjectDetection,
ConditionalDetrForSegmentation,
ConditionalDetrImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
a : List[Any] = logging.get_logger(__name__)
# here we list all keys to be renamed (original name on the left, our name on the right)
a : List[str] = []
for i in range(6):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(F'''transformer.encoder.layers.{i}.self_attn.out_proj.weight''', F'''encoder.layers.{i}.self_attn.out_proj.weight''')
)
rename_keys.append(
(F'''transformer.encoder.layers.{i}.self_attn.out_proj.bias''', F'''encoder.layers.{i}.self_attn.out_proj.bias''')
)
rename_keys.append((F'''transformer.encoder.layers.{i}.linear1.weight''', F'''encoder.layers.{i}.fc1.weight'''))
rename_keys.append((F'''transformer.encoder.layers.{i}.linear1.bias''', F'''encoder.layers.{i}.fc1.bias'''))
rename_keys.append((F'''transformer.encoder.layers.{i}.linear2.weight''', F'''encoder.layers.{i}.fc2.weight'''))
rename_keys.append((F'''transformer.encoder.layers.{i}.linear2.bias''', F'''encoder.layers.{i}.fc2.bias'''))
rename_keys.append(
(F'''transformer.encoder.layers.{i}.norm1.weight''', F'''encoder.layers.{i}.self_attn_layer_norm.weight''')
)
rename_keys.append((F'''transformer.encoder.layers.{i}.norm1.bias''', F'''encoder.layers.{i}.self_attn_layer_norm.bias'''))
rename_keys.append((F'''transformer.encoder.layers.{i}.norm2.weight''', F'''encoder.layers.{i}.final_layer_norm.weight'''))
rename_keys.append((F'''transformer.encoder.layers.{i}.norm2.bias''', F'''encoder.layers.{i}.final_layer_norm.bias'''))
# decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms
rename_keys.append(
(F'''transformer.decoder.layers.{i}.self_attn.out_proj.weight''', F'''decoder.layers.{i}.self_attn.out_proj.weight''')
)
rename_keys.append(
(F'''transformer.decoder.layers.{i}.self_attn.out_proj.bias''', F'''decoder.layers.{i}.self_attn.out_proj.bias''')
)
rename_keys.append(
(
F'''transformer.decoder.layers.{i}.cross_attn.out_proj.weight''',
F'''decoder.layers.{i}.encoder_attn.out_proj.weight''',
)
)
rename_keys.append(
(
F'''transformer.decoder.layers.{i}.cross_attn.out_proj.bias''',
F'''decoder.layers.{i}.encoder_attn.out_proj.bias''',
)
)
rename_keys.append((F'''transformer.decoder.layers.{i}.linear1.weight''', F'''decoder.layers.{i}.fc1.weight'''))
rename_keys.append((F'''transformer.decoder.layers.{i}.linear1.bias''', F'''decoder.layers.{i}.fc1.bias'''))
rename_keys.append((F'''transformer.decoder.layers.{i}.linear2.weight''', F'''decoder.layers.{i}.fc2.weight'''))
rename_keys.append((F'''transformer.decoder.layers.{i}.linear2.bias''', F'''decoder.layers.{i}.fc2.bias'''))
rename_keys.append(
(F'''transformer.decoder.layers.{i}.norm1.weight''', F'''decoder.layers.{i}.self_attn_layer_norm.weight''')
)
rename_keys.append((F'''transformer.decoder.layers.{i}.norm1.bias''', F'''decoder.layers.{i}.self_attn_layer_norm.bias'''))
rename_keys.append(
(F'''transformer.decoder.layers.{i}.norm2.weight''', F'''decoder.layers.{i}.encoder_attn_layer_norm.weight''')
)
rename_keys.append(
(F'''transformer.decoder.layers.{i}.norm2.bias''', F'''decoder.layers.{i}.encoder_attn_layer_norm.bias''')
)
rename_keys.append((F'''transformer.decoder.layers.{i}.norm3.weight''', F'''decoder.layers.{i}.final_layer_norm.weight'''))
rename_keys.append((F'''transformer.decoder.layers.{i}.norm3.bias''', F'''decoder.layers.{i}.final_layer_norm.bias'''))
# q, k, v projections in self/cross-attention in decoder for conditional DETR
rename_keys.append(
(F'''transformer.decoder.layers.{i}.sa_qcontent_proj.weight''', F'''decoder.layers.{i}.sa_qcontent_proj.weight''')
)
rename_keys.append(
(F'''transformer.decoder.layers.{i}.sa_kcontent_proj.weight''', F'''decoder.layers.{i}.sa_kcontent_proj.weight''')
)
rename_keys.append(
(F'''transformer.decoder.layers.{i}.sa_qpos_proj.weight''', F'''decoder.layers.{i}.sa_qpos_proj.weight''')
)
rename_keys.append(
(F'''transformer.decoder.layers.{i}.sa_kpos_proj.weight''', F'''decoder.layers.{i}.sa_kpos_proj.weight''')
)
rename_keys.append((F'''transformer.decoder.layers.{i}.sa_v_proj.weight''', F'''decoder.layers.{i}.sa_v_proj.weight'''))
rename_keys.append(
(F'''transformer.decoder.layers.{i}.ca_qcontent_proj.weight''', F'''decoder.layers.{i}.ca_qcontent_proj.weight''')
)
# rename_keys.append((f"transformer.decoder.layers.{i}.ca_qpos_proj.weight", f"decoder.layers.{i}.ca_qpos_proj.weight"))
rename_keys.append(
(F'''transformer.decoder.layers.{i}.ca_kcontent_proj.weight''', F'''decoder.layers.{i}.ca_kcontent_proj.weight''')
)
rename_keys.append(
(F'''transformer.decoder.layers.{i}.ca_kpos_proj.weight''', F'''decoder.layers.{i}.ca_kpos_proj.weight''')
)
rename_keys.append((F'''transformer.decoder.layers.{i}.ca_v_proj.weight''', F'''decoder.layers.{i}.ca_v_proj.weight'''))
rename_keys.append(
(F'''transformer.decoder.layers.{i}.ca_qpos_sine_proj.weight''', F'''decoder.layers.{i}.ca_qpos_sine_proj.weight''')
)
rename_keys.append(
(F'''transformer.decoder.layers.{i}.sa_qcontent_proj.bias''', F'''decoder.layers.{i}.sa_qcontent_proj.bias''')
)
rename_keys.append(
(F'''transformer.decoder.layers.{i}.sa_kcontent_proj.bias''', F'''decoder.layers.{i}.sa_kcontent_proj.bias''')
)
rename_keys.append((F'''transformer.decoder.layers.{i}.sa_qpos_proj.bias''', F'''decoder.layers.{i}.sa_qpos_proj.bias'''))
rename_keys.append((F'''transformer.decoder.layers.{i}.sa_kpos_proj.bias''', F'''decoder.layers.{i}.sa_kpos_proj.bias'''))
rename_keys.append((F'''transformer.decoder.layers.{i}.sa_v_proj.bias''', F'''decoder.layers.{i}.sa_v_proj.bias'''))
rename_keys.append(
(F'''transformer.decoder.layers.{i}.ca_qcontent_proj.bias''', F'''decoder.layers.{i}.ca_qcontent_proj.bias''')
)
# rename_keys.append((f"transformer.decoder.layers.{i}.ca_qpos_proj.bias", f"decoder.layers.{i}.ca_qpos_proj.bias"))
rename_keys.append(
(F'''transformer.decoder.layers.{i}.ca_kcontent_proj.bias''', F'''decoder.layers.{i}.ca_kcontent_proj.bias''')
)
rename_keys.append((F'''transformer.decoder.layers.{i}.ca_kpos_proj.bias''', F'''decoder.layers.{i}.ca_kpos_proj.bias'''))
rename_keys.append((F'''transformer.decoder.layers.{i}.ca_v_proj.bias''', F'''decoder.layers.{i}.ca_v_proj.bias'''))
rename_keys.append(
(F'''transformer.decoder.layers.{i}.ca_qpos_sine_proj.bias''', F'''decoder.layers.{i}.ca_qpos_sine_proj.bias''')
)
# convolutional projection + query embeddings + layernorm of decoder + class and bounding box heads
# for conditional DETR, also convert reference point head and query scale MLP
rename_keys.extend(
[
("""input_proj.weight""", """input_projection.weight"""),
("""input_proj.bias""", """input_projection.bias"""),
("""query_embed.weight""", """query_position_embeddings.weight"""),
("""transformer.decoder.norm.weight""", """decoder.layernorm.weight"""),
("""transformer.decoder.norm.bias""", """decoder.layernorm.bias"""),
("""class_embed.weight""", """class_labels_classifier.weight"""),
("""class_embed.bias""", """class_labels_classifier.bias"""),
("""bbox_embed.layers.0.weight""", """bbox_predictor.layers.0.weight"""),
("""bbox_embed.layers.0.bias""", """bbox_predictor.layers.0.bias"""),
("""bbox_embed.layers.1.weight""", """bbox_predictor.layers.1.weight"""),
("""bbox_embed.layers.1.bias""", """bbox_predictor.layers.1.bias"""),
("""bbox_embed.layers.2.weight""", """bbox_predictor.layers.2.weight"""),
("""bbox_embed.layers.2.bias""", """bbox_predictor.layers.2.bias"""),
("""transformer.decoder.ref_point_head.layers.0.weight""", """decoder.ref_point_head.layers.0.weight"""),
("""transformer.decoder.ref_point_head.layers.0.bias""", """decoder.ref_point_head.layers.0.bias"""),
("""transformer.decoder.ref_point_head.layers.1.weight""", """decoder.ref_point_head.layers.1.weight"""),
("""transformer.decoder.ref_point_head.layers.1.bias""", """decoder.ref_point_head.layers.1.bias"""),
("""transformer.decoder.query_scale.layers.0.weight""", """decoder.query_scale.layers.0.weight"""),
("""transformer.decoder.query_scale.layers.0.bias""", """decoder.query_scale.layers.0.bias"""),
("""transformer.decoder.query_scale.layers.1.weight""", """decoder.query_scale.layers.1.weight"""),
("""transformer.decoder.query_scale.layers.1.bias""", """decoder.query_scale.layers.1.bias"""),
("""transformer.decoder.layers.0.ca_qpos_proj.weight""", """decoder.layers.0.ca_qpos_proj.weight"""),
("""transformer.decoder.layers.0.ca_qpos_proj.bias""", """decoder.layers.0.ca_qpos_proj.bias"""),
]
)
def __lowerCamelCase ( _lowercase , _lowercase , _lowercase ) -> Optional[Any]:
UpperCAmelCase : List[str] = state_dict.pop(_lowercase )
UpperCAmelCase : List[str] = val
def __lowerCamelCase ( _lowercase ) -> Any:
UpperCAmelCase : Union[str, Any] = OrderedDict()
for key, value in state_dict.items():
if "backbone.0.body" in key:
UpperCAmelCase : List[str] = key.replace("""backbone.0.body""" , """backbone.conv_encoder.model""" )
UpperCAmelCase : Dict = value
else:
UpperCAmelCase : List[Any] = value
return new_state_dict
def __lowerCamelCase ( _lowercase , _lowercase=False ) -> Optional[int]:
UpperCAmelCase : Dict = """"""
if is_panoptic:
UpperCAmelCase : Tuple = """conditional_detr."""
# first: transformer encoder
for i in range(6 ):
# read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias)
UpperCAmelCase : List[Any] = state_dict.pop(F'''{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight''' )
UpperCAmelCase : List[Any] = state_dict.pop(F'''{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias''' )
# next, add query, keys and values (in that order) to the state dict
UpperCAmelCase : Dict = in_proj_weight[:2_5_6, :]
UpperCAmelCase : Optional[Any] = in_proj_bias[:2_5_6]
UpperCAmelCase : List[Any] = in_proj_weight[2_5_6:5_1_2, :]
UpperCAmelCase : Tuple = in_proj_bias[2_5_6:5_1_2]
UpperCAmelCase : List[str] = in_proj_weight[-2_5_6:, :]
UpperCAmelCase : List[str] = in_proj_bias[-2_5_6:]
def __lowerCamelCase ( ) -> Dict:
UpperCAmelCase : List[Any] = """http://images.cocodataset.org/val2017/000000039769.jpg"""
UpperCAmelCase : Tuple = Image.open(requests.get(_lowercase , stream=_lowercase ).raw )
return im
@torch.no_grad()
def __lowerCamelCase ( _lowercase , _lowercase ) -> str:
UpperCAmelCase : str = ConditionalDetrConfig()
# set backbone and dilation attributes
if "resnet101" in model_name:
UpperCAmelCase : List[Any] = """resnet101"""
if "dc5" in model_name:
UpperCAmelCase : Optional[int] = True
UpperCAmelCase : List[Any] = """panoptic""" in model_name
if is_panoptic:
UpperCAmelCase : Union[str, Any] = 2_5_0
else:
UpperCAmelCase : int = 9_1
UpperCAmelCase : Tuple = """huggingface/label-files"""
UpperCAmelCase : List[Any] = """coco-detection-id2label.json"""
UpperCAmelCase : Optional[int] = json.load(open(hf_hub_download(_lowercase , _lowercase , repo_type="""dataset""" ) , """r""" ) )
UpperCAmelCase : Dict = {int(_lowercase ): v for k, v in idalabel.items()}
UpperCAmelCase : Optional[Any] = idalabel
UpperCAmelCase : List[Any] = {v: k for k, v in idalabel.items()}
# load image processor
UpperCAmelCase : List[str] = """coco_panoptic""" if is_panoptic else """coco_detection"""
UpperCAmelCase : List[Any] = ConditionalDetrImageProcessor(format=_lowercase )
# prepare image
UpperCAmelCase : Union[str, Any] = prepare_img()
UpperCAmelCase : Dict = image_processor(images=_lowercase , return_tensors="""pt""" )
UpperCAmelCase : List[Any] = encoding["""pixel_values"""]
logger.info(F'''Converting model {model_name}...''' )
# load original model from torch hub
UpperCAmelCase : int = torch.hub.load("""DeppMeng/ConditionalDETR""" , _lowercase , pretrained=_lowercase ).eval()
UpperCAmelCase : List[Any] = conditional_detr.state_dict()
# rename keys
for src, dest in rename_keys:
if is_panoptic:
UpperCAmelCase : List[Any] = """conditional_detr.""" + src
rename_key(_lowercase , _lowercase , _lowercase )
UpperCAmelCase : List[Any] = rename_backbone_keys(_lowercase )
# query, key and value matrices need special treatment
read_in_q_k_v(_lowercase , is_panoptic=_lowercase )
# important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them
UpperCAmelCase : int = """conditional_detr.model.""" if is_panoptic else """model."""
for key in state_dict.copy().keys():
if is_panoptic:
if (
key.startswith("""conditional_detr""" )
and not key.startswith("""class_labels_classifier""" )
and not key.startswith("""bbox_predictor""" )
):
UpperCAmelCase : Union[str, Any] = state_dict.pop(_lowercase )
UpperCAmelCase : int = val
elif "class_labels_classifier" in key or "bbox_predictor" in key:
UpperCAmelCase : Any = state_dict.pop(_lowercase )
UpperCAmelCase : Optional[Any] = val
elif key.startswith("""bbox_attention""" ) or key.startswith("""mask_head""" ):
continue
else:
UpperCAmelCase : List[Any] = state_dict.pop(_lowercase )
UpperCAmelCase : str = val
else:
if not key.startswith("""class_labels_classifier""" ) and not key.startswith("""bbox_predictor""" ):
UpperCAmelCase : Optional[int] = state_dict.pop(_lowercase )
UpperCAmelCase : Union[str, Any] = val
# finally, create HuggingFace model and load state dict
UpperCAmelCase : List[Any] = ConditionalDetrForSegmentation(_lowercase ) if is_panoptic else ConditionalDetrForObjectDetection(_lowercase )
model.load_state_dict(_lowercase )
model.eval()
model.push_to_hub(repo_id=_lowercase , organization="""DepuMeng""" , commit_message="""Add model""" )
# verify our conversion
UpperCAmelCase : Union[str, Any] = conditional_detr(_lowercase )
UpperCAmelCase : int = model(_lowercase )
assert torch.allclose(outputs.logits , original_outputs["""pred_logits"""] , atol=1e-4 )
assert torch.allclose(outputs.pred_boxes , original_outputs["""pred_boxes"""] , atol=1e-4 )
if is_panoptic:
assert torch.allclose(outputs.pred_masks , original_outputs["""pred_masks"""] , atol=1e-4 )
# Save model and image processor
logger.info(F'''Saving PyTorch model and image processor to {pytorch_dump_folder_path}...''' )
Path(_lowercase ).mkdir(exist_ok=_lowercase )
model.save_pretrained(_lowercase )
image_processor.save_pretrained(_lowercase )
if __name__ == "__main__":
a : Tuple = argparse.ArgumentParser()
parser.add_argument(
"""--model_name""",
default="""conditional_detr_resnet50""",
type=str,
help="""Name of the CONDITIONAL_DETR model you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the folder to output PyTorch model."""
)
a : Optional[Any] = parser.parse_args()
convert_conditional_detr_checkpoint(args.model_name, args.pytorch_dump_folder_path)
| 672 | 1 |
'''simple docstring'''
import contextlib
import csv
import json
import os
import sqlitea
import tarfile
import textwrap
import zipfile
import pyarrow as pa
import pyarrow.parquet as pq
import pytest
import datasets
import datasets.config
@pytest.fixture(scope="""session""" )
def __lowerCamelCase ( ) -> List[Any]:
UpperCAmelCase : Dict = 1_0
UpperCAmelCase : Union[str, Any] = datasets.Features(
{
"""tokens""": datasets.Sequence(datasets.Value("""string""" ) ),
"""labels""": datasets.Sequence(datasets.ClassLabel(names=["""negative""", """positive"""] ) ),
"""answers""": datasets.Sequence(
{
"""text""": datasets.Value("""string""" ),
"""answer_start""": datasets.Value("""int32""" ),
} ),
"""id""": datasets.Value("""int64""" ),
} )
UpperCAmelCase : List[Any] = datasets.Dataset.from_dict(
{
"""tokens""": [["""foo"""] * 5] * n,
"""labels""": [[1] * 5] * n,
"""answers""": [{"""answer_start""": [9_7], """text""": ["""1976"""]}] * 1_0,
"""id""": list(range(_lowercase ) ),
} , features=_lowercase , )
return dataset
@pytest.fixture(scope="""session""" )
def __lowerCamelCase ( _lowercase , _lowercase ) -> Optional[int]:
UpperCAmelCase : Dict = str(tmp_path_factory.mktemp("""data""" ) / """file.arrow""" )
dataset.map(cache_file_name=_lowercase )
return filename
# FILE_CONTENT + files
a : int = """\
Text data.
Second line of data."""
@pytest.fixture(scope="""session""" )
def __lowerCamelCase ( _lowercase ) -> int:
UpperCAmelCase : Tuple = tmp_path_factory.mktemp("""data""" ) / """file.txt"""
UpperCAmelCase : int = FILE_CONTENT
with open(_lowercase , """w""" ) as f:
f.write(_lowercase )
return filename
@pytest.fixture(scope="""session""" )
def __lowerCamelCase ( _lowercase ) -> List[str]:
import bza
UpperCAmelCase : Optional[Any] = tmp_path_factory.mktemp("""data""" ) / """file.txt.bz2"""
UpperCAmelCase : Union[str, Any] = bytes(_lowercase , """utf-8""" )
with bza.open(_lowercase , """wb""" ) as f:
f.write(_lowercase )
return path
@pytest.fixture(scope="""session""" )
def __lowerCamelCase ( _lowercase ) -> int:
import gzip
UpperCAmelCase : List[str] = str(tmp_path_factory.mktemp("""data""" ) / """file.txt.gz""" )
UpperCAmelCase : Optional[int] = bytes(_lowercase , """utf-8""" )
with gzip.open(_lowercase , """wb""" ) as f:
f.write(_lowercase )
return path
@pytest.fixture(scope="""session""" )
def __lowerCamelCase ( _lowercase ) -> Optional[int]:
if datasets.config.LZ4_AVAILABLE:
import lza.frame
UpperCAmelCase : Optional[Any] = tmp_path_factory.mktemp("""data""" ) / """file.txt.lz4"""
UpperCAmelCase : Optional[Any] = bytes(_lowercase , """utf-8""" )
with lza.frame.open(_lowercase , """wb""" ) as f:
f.write(_lowercase )
return path
@pytest.fixture(scope="""session""" )
def __lowerCamelCase ( _lowercase , _lowercase ) -> List[str]:
if datasets.config.PY7ZR_AVAILABLE:
import pyazr
UpperCAmelCase : Tuple = tmp_path_factory.mktemp("""data""" ) / """file.txt.7z"""
with pyazr.SevenZipFile(_lowercase , """w""" ) as archive:
archive.write(_lowercase , arcname=os.path.basename(_lowercase ) )
return path
@pytest.fixture(scope="""session""" )
def __lowerCamelCase ( _lowercase , _lowercase ) -> str:
import tarfile
UpperCAmelCase : Any = tmp_path_factory.mktemp("""data""" ) / """file.txt.tar"""
with tarfile.TarFile(_lowercase , """w""" ) as f:
f.add(_lowercase , arcname=os.path.basename(_lowercase ) )
return path
@pytest.fixture(scope="""session""" )
def __lowerCamelCase ( _lowercase ) -> List[str]:
import lzma
UpperCAmelCase : List[str] = tmp_path_factory.mktemp("""data""" ) / """file.txt.xz"""
UpperCAmelCase : int = bytes(_lowercase , """utf-8""" )
with lzma.open(_lowercase , """wb""" ) as f:
f.write(_lowercase )
return path
@pytest.fixture(scope="""session""" )
def __lowerCamelCase ( _lowercase , _lowercase ) -> Optional[int]:
import zipfile
UpperCAmelCase : Any = tmp_path_factory.mktemp("""data""" ) / """file.txt.zip"""
with zipfile.ZipFile(_lowercase , """w""" ) as f:
f.write(_lowercase , arcname=os.path.basename(_lowercase ) )
return path
@pytest.fixture(scope="""session""" )
def __lowerCamelCase ( _lowercase ) -> List[str]:
if datasets.config.ZSTANDARD_AVAILABLE:
import zstandard as zstd
UpperCAmelCase : Any = tmp_path_factory.mktemp("""data""" ) / """file.txt.zst"""
UpperCAmelCase : Union[str, Any] = bytes(_lowercase , """utf-8""" )
with zstd.open(_lowercase , """wb""" ) as f:
f.write(_lowercase )
return path
@pytest.fixture(scope="""session""" )
def __lowerCamelCase ( _lowercase ) -> Union[str, Any]:
UpperCAmelCase : Any = tmp_path_factory.mktemp("""data""" ) / """file.xml"""
UpperCAmelCase : Any = textwrap.dedent(
"""\
<?xml version=\"1.0\" encoding=\"UTF-8\" ?>
<tmx version=\"1.4\">
<header segtype=\"sentence\" srclang=\"ca\" />
<body>
<tu>
<tuv xml:lang=\"ca\"><seg>Contingut 1</seg></tuv>
<tuv xml:lang=\"en\"><seg>Content 1</seg></tuv>
</tu>
<tu>
<tuv xml:lang=\"ca\"><seg>Contingut 2</seg></tuv>
<tuv xml:lang=\"en\"><seg>Content 2</seg></tuv>
</tu>
<tu>
<tuv xml:lang=\"ca\"><seg>Contingut 3</seg></tuv>
<tuv xml:lang=\"en\"><seg>Content 3</seg></tuv>
</tu>
<tu>
<tuv xml:lang=\"ca\"><seg>Contingut 4</seg></tuv>
<tuv xml:lang=\"en\"><seg>Content 4</seg></tuv>
</tu>
<tu>
<tuv xml:lang=\"ca\"><seg>Contingut 5</seg></tuv>
<tuv xml:lang=\"en\"><seg>Content 5</seg></tuv>
</tu>
</body>
</tmx>""" )
with open(_lowercase , """w""" ) as f:
f.write(_lowercase )
return filename
a : Any = [
{"""col_1""": """0""", """col_2""": 0, """col_3""": 0.0},
{"""col_1""": """1""", """col_2""": 1, """col_3""": 1.0},
{"""col_1""": """2""", """col_2""": 2, """col_3""": 2.0},
{"""col_1""": """3""", """col_2""": 3, """col_3""": 3.0},
]
a : Dict = [
{"""col_1""": """4""", """col_2""": 4, """col_3""": 4.0},
{"""col_1""": """5""", """col_2""": 5, """col_3""": 5.0},
]
a : Optional[int] = {
"""col_1""": ["""0""", """1""", """2""", """3"""],
"""col_2""": [0, 1, 2, 3],
"""col_3""": [0.0, 1.0, 2.0, 3.0],
}
a : int = [
{"""col_3""": 0.0, """col_1""": """0""", """col_2""": 0},
{"""col_3""": 1.0, """col_1""": """1""", """col_2""": 1},
]
a : Optional[Any] = [
{"""col_1""": """s0""", """col_2""": 0, """col_3""": 0.0},
{"""col_1""": """s1""", """col_2""": 1, """col_3""": 1.0},
{"""col_1""": """s2""", """col_2""": 2, """col_3""": 2.0},
{"""col_1""": """s3""", """col_2""": 3, """col_3""": 3.0},
]
@pytest.fixture(scope="""session""" )
def __lowerCamelCase ( ) -> Any:
return DATA_DICT_OF_LISTS
@pytest.fixture(scope="""session""" )
def __lowerCamelCase ( _lowercase ) -> Union[str, Any]:
UpperCAmelCase : List[Any] = datasets.Dataset.from_dict(_lowercase )
UpperCAmelCase : Optional[int] = str(tmp_path_factory.mktemp("""data""" ) / """dataset.arrow""" )
dataset.map(cache_file_name=_lowercase )
return path
@pytest.fixture(scope="""session""" )
def __lowerCamelCase ( _lowercase ) -> Tuple:
UpperCAmelCase : List[Any] = str(tmp_path_factory.mktemp("""data""" ) / """dataset.sqlite""" )
with contextlib.closing(sqlitea.connect(_lowercase ) ) as con:
UpperCAmelCase : str = con.cursor()
cur.execute("""CREATE TABLE dataset(col_1 text, col_2 int, col_3 real)""" )
for item in DATA:
cur.execute("""INSERT INTO dataset(col_1, col_2, col_3) VALUES (?, ?, ?)""" , tuple(item.values() ) )
con.commit()
return path
@pytest.fixture(scope="""session""" )
def __lowerCamelCase ( _lowercase ) -> Any:
UpperCAmelCase : Union[str, Any] = str(tmp_path_factory.mktemp("""data""" ) / """dataset.csv""" )
with open(_lowercase , """w""" , newline="""""" ) as f:
UpperCAmelCase : Optional[Any] = csv.DictWriter(_lowercase , fieldnames=["""col_1""", """col_2""", """col_3"""] )
writer.writeheader()
for item in DATA:
writer.writerow(_lowercase )
return path
@pytest.fixture(scope="""session""" )
def __lowerCamelCase ( _lowercase ) -> Any:
UpperCAmelCase : Optional[int] = str(tmp_path_factory.mktemp("""data""" ) / """dataset2.csv""" )
with open(_lowercase , """w""" , newline="""""" ) as f:
UpperCAmelCase : Tuple = csv.DictWriter(_lowercase , fieldnames=["""col_1""", """col_2""", """col_3"""] )
writer.writeheader()
for item in DATA:
writer.writerow(_lowercase )
return path
@pytest.fixture(scope="""session""" )
def __lowerCamelCase ( _lowercase , _lowercase ) -> List[str]:
import bza
UpperCAmelCase : int = tmp_path_factory.mktemp("""data""" ) / """dataset.csv.bz2"""
with open(_lowercase , """rb""" ) as f:
UpperCAmelCase : List[str] = f.read()
# data = bytes(FILE_CONTENT, "utf-8")
with bza.open(_lowercase , """wb""" ) as f:
f.write(_lowercase )
return path
@pytest.fixture(scope="""session""" )
def __lowerCamelCase ( _lowercase , _lowercase , _lowercase ) -> Union[str, Any]:
UpperCAmelCase : Union[str, Any] = tmp_path_factory.mktemp("""data""" ) / """dataset.csv.zip"""
with zipfile.ZipFile(_lowercase , """w""" ) as f:
f.write(_lowercase , arcname=os.path.basename(_lowercase ) )
f.write(_lowercase , arcname=os.path.basename(_lowercase ) )
return path
@pytest.fixture(scope="""session""" )
def __lowerCamelCase ( _lowercase , _lowercase , _lowercase ) -> List[str]:
UpperCAmelCase : Union[str, Any] = tmp_path_factory.mktemp("""data""" ) / """dataset.csv.zip"""
with zipfile.ZipFile(_lowercase , """w""" ) as f:
f.write(_lowercase , arcname=os.path.basename(csv_path.replace(""".csv""" , """.CSV""" ) ) )
f.write(_lowercase , arcname=os.path.basename(csva_path.replace(""".csv""" , """.CSV""" ) ) )
return path
@pytest.fixture(scope="""session""" )
def __lowerCamelCase ( _lowercase , _lowercase , _lowercase ) -> List[str]:
UpperCAmelCase : Dict = tmp_path_factory.mktemp("""data""" ) / """dataset_with_dir.csv.zip"""
with zipfile.ZipFile(_lowercase , """w""" ) as f:
f.write(_lowercase , arcname=os.path.join("""main_dir""" , os.path.basename(_lowercase ) ) )
f.write(_lowercase , arcname=os.path.join("""main_dir""" , os.path.basename(_lowercase ) ) )
return path
@pytest.fixture(scope="""session""" )
def __lowerCamelCase ( _lowercase ) -> Any:
UpperCAmelCase : int = str(tmp_path_factory.mktemp("""data""" ) / """dataset.parquet""" )
UpperCAmelCase : Union[str, Any] = pa.schema(
{
"""col_1""": pa.string(),
"""col_2""": pa.intaa(),
"""col_3""": pa.floataa(),
} )
with open(_lowercase , """wb""" ) as f:
UpperCAmelCase : int = pq.ParquetWriter(_lowercase , schema=_lowercase )
UpperCAmelCase : int = pa.Table.from_pydict({k: [DATA[i][k] for i in range(len(_lowercase ) )] for k in DATA[0]} , schema=_lowercase )
writer.write_table(_lowercase )
writer.close()
return path
@pytest.fixture(scope="""session""" )
def __lowerCamelCase ( _lowercase ) -> Dict:
UpperCAmelCase : Optional[int] = str(tmp_path_factory.mktemp("""data""" ) / """dataset.json""" )
UpperCAmelCase : Dict = {"""data""": DATA}
with open(_lowercase , """w""" ) as f:
json.dump(_lowercase , _lowercase )
return path
@pytest.fixture(scope="""session""" )
def __lowerCamelCase ( _lowercase ) -> Dict:
UpperCAmelCase : Dict = str(tmp_path_factory.mktemp("""data""" ) / """dataset.json""" )
UpperCAmelCase : Tuple = {"""data""": DATA_DICT_OF_LISTS}
with open(_lowercase , """w""" ) as f:
json.dump(_lowercase , _lowercase )
return path
@pytest.fixture(scope="""session""" )
def __lowerCamelCase ( _lowercase ) -> Tuple:
UpperCAmelCase : List[Any] = str(tmp_path_factory.mktemp("""data""" ) / """dataset.jsonl""" )
with open(_lowercase , """w""" ) as f:
for item in DATA:
f.write(json.dumps(_lowercase ) + """\n""" )
return path
@pytest.fixture(scope="""session""" )
def __lowerCamelCase ( _lowercase ) -> int:
UpperCAmelCase : Tuple = str(tmp_path_factory.mktemp("""data""" ) / """dataset2.jsonl""" )
with open(_lowercase , """w""" ) as f:
for item in DATA:
f.write(json.dumps(_lowercase ) + """\n""" )
return path
@pytest.fixture(scope="""session""" )
def __lowerCamelCase ( _lowercase ) -> int:
UpperCAmelCase : Tuple = str(tmp_path_factory.mktemp("""data""" ) / """dataset_312.jsonl""" )
with open(_lowercase , """w""" ) as f:
for item in DATA_312:
f.write(json.dumps(_lowercase ) + """\n""" )
return path
@pytest.fixture(scope="""session""" )
def __lowerCamelCase ( _lowercase ) -> int:
UpperCAmelCase : int = str(tmp_path_factory.mktemp("""data""" ) / """dataset-str.jsonl""" )
with open(_lowercase , """w""" ) as f:
for item in DATA_STR:
f.write(json.dumps(_lowercase ) + """\n""" )
return path
@pytest.fixture(scope="""session""" )
def __lowerCamelCase ( _lowercase , _lowercase ) -> Any:
import gzip
UpperCAmelCase : Dict = str(tmp_path_factory.mktemp("""data""" ) / """dataset.txt.gz""" )
with open(_lowercase , """rb""" ) as orig_file:
with gzip.open(_lowercase , """wb""" ) as zipped_file:
zipped_file.writelines(_lowercase )
return path
@pytest.fixture(scope="""session""" )
def __lowerCamelCase ( _lowercase , _lowercase ) -> Optional[int]:
import gzip
UpperCAmelCase : str = str(tmp_path_factory.mktemp("""data""" ) / """dataset.jsonl.gz""" )
with open(_lowercase , """rb""" ) as orig_file:
with gzip.open(_lowercase , """wb""" ) as zipped_file:
zipped_file.writelines(_lowercase )
return path
@pytest.fixture(scope="""session""" )
def __lowerCamelCase ( _lowercase , _lowercase , _lowercase ) -> Dict:
UpperCAmelCase : List[Any] = tmp_path_factory.mktemp("""data""" ) / """dataset.jsonl.zip"""
with zipfile.ZipFile(_lowercase , """w""" ) as f:
f.write(_lowercase , arcname=os.path.basename(_lowercase ) )
f.write(_lowercase , arcname=os.path.basename(_lowercase ) )
return path
@pytest.fixture(scope="""session""" )
def __lowerCamelCase ( _lowercase , _lowercase , _lowercase , _lowercase ) -> str:
UpperCAmelCase : int = tmp_path_factory.mktemp("""data""" ) / """dataset_nested.jsonl.zip"""
with zipfile.ZipFile(_lowercase , """w""" ) as f:
f.write(_lowercase , arcname=os.path.join("""nested""" , os.path.basename(_lowercase ) ) )
return path
@pytest.fixture(scope="""session""" )
def __lowerCamelCase ( _lowercase , _lowercase , _lowercase ) -> int:
UpperCAmelCase : int = tmp_path_factory.mktemp("""data""" ) / """dataset_with_dir.jsonl.zip"""
with zipfile.ZipFile(_lowercase , """w""" ) as f:
f.write(_lowercase , arcname=os.path.join("""main_dir""" , os.path.basename(_lowercase ) ) )
f.write(_lowercase , arcname=os.path.join("""main_dir""" , os.path.basename(_lowercase ) ) )
return path
@pytest.fixture(scope="""session""" )
def __lowerCamelCase ( _lowercase , _lowercase , _lowercase ) -> Dict:
UpperCAmelCase : List[Any] = tmp_path_factory.mktemp("""data""" ) / """dataset.jsonl.tar"""
with tarfile.TarFile(_lowercase , """w""" ) as f:
f.add(_lowercase , arcname=os.path.basename(_lowercase ) )
f.add(_lowercase , arcname=os.path.basename(_lowercase ) )
return path
@pytest.fixture(scope="""session""" )
def __lowerCamelCase ( _lowercase , _lowercase , _lowercase , _lowercase ) -> str:
UpperCAmelCase : Tuple = tmp_path_factory.mktemp("""data""" ) / """dataset_nested.jsonl.tar"""
with tarfile.TarFile(_lowercase , """w""" ) as f:
f.add(_lowercase , arcname=os.path.join("""nested""" , os.path.basename(_lowercase ) ) )
return path
@pytest.fixture(scope="""session""" )
def __lowerCamelCase ( _lowercase ) -> Any:
UpperCAmelCase : List[str] = ["""0""", """1""", """2""", """3"""]
UpperCAmelCase : List[Any] = str(tmp_path_factory.mktemp("""data""" ) / """dataset.txt""" )
with open(_lowercase , """w""" ) as f:
for item in data:
f.write(item + """\n""" )
return path
@pytest.fixture(scope="""session""" )
def __lowerCamelCase ( _lowercase ) -> Optional[int]:
UpperCAmelCase : List[str] = ["""0""", """1""", """2""", """3"""]
UpperCAmelCase : Optional[int] = str(tmp_path_factory.mktemp("""data""" ) / """dataset2.txt""" )
with open(_lowercase , """w""" ) as f:
for item in data:
f.write(item + """\n""" )
return path
@pytest.fixture(scope="""session""" )
def __lowerCamelCase ( _lowercase ) -> int:
UpperCAmelCase : Optional[int] = ["""0""", """1""", """2""", """3"""]
UpperCAmelCase : int = tmp_path_factory.mktemp("""data""" ) / """dataset.abc"""
with open(_lowercase , """w""" ) as f:
for item in data:
f.write(item + """\n""" )
return path
@pytest.fixture(scope="""session""" )
def __lowerCamelCase ( _lowercase , _lowercase , _lowercase ) -> Union[str, Any]:
UpperCAmelCase : List[str] = tmp_path_factory.mktemp("""data""" ) / """dataset.text.zip"""
with zipfile.ZipFile(_lowercase , """w""" ) as f:
f.write(_lowercase , arcname=os.path.basename(_lowercase ) )
f.write(_lowercase , arcname=os.path.basename(_lowercase ) )
return path
@pytest.fixture(scope="""session""" )
def __lowerCamelCase ( _lowercase , _lowercase , _lowercase ) -> Optional[Any]:
UpperCAmelCase : str = tmp_path_factory.mktemp("""data""" ) / """dataset_with_dir.text.zip"""
with zipfile.ZipFile(_lowercase , """w""" ) as f:
f.write(_lowercase , arcname=os.path.join("""main_dir""" , os.path.basename(_lowercase ) ) )
f.write(_lowercase , arcname=os.path.join("""main_dir""" , os.path.basename(_lowercase ) ) )
return path
@pytest.fixture(scope="""session""" )
def __lowerCamelCase ( _lowercase , _lowercase , _lowercase ) -> List[str]:
UpperCAmelCase : Any = tmp_path_factory.mktemp("""data""" ) / """dataset.ext.zip"""
with zipfile.ZipFile(_lowercase , """w""" ) as f:
f.write(_lowercase , arcname=os.path.basename("""unsupported.ext""" ) )
f.write(_lowercase , arcname=os.path.basename("""unsupported_2.ext""" ) )
return path
@pytest.fixture(scope="""session""" )
def __lowerCamelCase ( _lowercase ) -> Optional[Any]:
UpperCAmelCase : Union[str, Any] = """\n""".join(["""First""", """Second\u2029with Unicode new line""", """Third"""] )
UpperCAmelCase : Tuple = str(tmp_path_factory.mktemp("""data""" ) / """dataset_with_unicode_new_lines.txt""" )
with open(_lowercase , """w""" , encoding="""utf-8""" ) as f:
f.write(_lowercase )
return path
@pytest.fixture(scope="""session""" )
def __lowerCamelCase ( ) -> List[str]:
return os.path.join("""tests""" , """features""" , """data""" , """test_image_rgb.jpg""" )
@pytest.fixture(scope="""session""" )
def __lowerCamelCase ( ) -> Any:
return os.path.join("""tests""" , """features""" , """data""" , """test_audio_44100.wav""" )
@pytest.fixture(scope="""session""" )
def __lowerCamelCase ( _lowercase , _lowercase ) -> Optional[int]:
UpperCAmelCase : Optional[Any] = tmp_path_factory.mktemp("""data""" ) / """dataset.img.zip"""
with zipfile.ZipFile(_lowercase , """w""" ) as f:
f.write(_lowercase , arcname=os.path.basename(_lowercase ) )
f.write(_lowercase , arcname=os.path.basename(_lowercase ).replace(""".jpg""" , """2.jpg""" ) )
return path
@pytest.fixture(scope="""session""" )
def __lowerCamelCase ( _lowercase ) -> List[str]:
UpperCAmelCase : Optional[int] = tmp_path_factory.mktemp("""data_dir""" )
(data_dir / "subdir").mkdir()
with open(data_dir / """subdir""" / """train.txt""" , """w""" ) as f:
f.write("""foo\n""" * 1_0 )
with open(data_dir / """subdir""" / """test.txt""" , """w""" ) as f:
f.write("""bar\n""" * 1_0 )
# hidden file
with open(data_dir / """subdir""" / """.test.txt""" , """w""" ) as f:
f.write("""bar\n""" * 1_0 )
# hidden directory
(data_dir / ".subdir").mkdir()
with open(data_dir / """.subdir""" / """train.txt""" , """w""" ) as f:
f.write("""foo\n""" * 1_0 )
with open(data_dir / """.subdir""" / """test.txt""" , """w""" ) as f:
f.write("""bar\n""" * 1_0 )
return data_dir
| 672 |
'''simple docstring'''
from ...utils import is_torch_available, is_transformers_available
if is_transformers_available() and is_torch_available():
from .pipeline_vq_diffusion import LearnedClassifierFreeSamplingEmbeddings, VQDiffusionPipeline
| 672 | 1 |
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_donut import DonutImageProcessor
a : int = logging.get_logger(__name__)
class UpperCamelCase_ ( __magic_name__ ):
def __init__( self , *A , **A ) -> None:
warnings.warn(
"""The class DonutFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"""
""" use DonutImageProcessor instead.""" , A , )
super().__init__(*A , **A )
| 672 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_barthez import BarthezTokenizer
else:
a : int = None
a : List[Any] = logging.get_logger(__name__)
a : Dict = {"""vocab_file""": """sentencepiece.bpe.model""", """tokenizer_file""": """tokenizer.json"""}
a : Union[str, Any] = {
"""vocab_file""": {
"""moussaKam/mbarthez""": """https://huggingface.co/moussaKam/mbarthez/resolve/main/sentencepiece.bpe.model""",
"""moussaKam/barthez""": """https://huggingface.co/moussaKam/barthez/resolve/main/sentencepiece.bpe.model""",
"""moussaKam/barthez-orangesum-title""": (
"""https://huggingface.co/moussaKam/barthez-orangesum-title/resolve/main/sentencepiece.bpe.model"""
),
},
"""tokenizer_file""": {
"""moussaKam/mbarthez""": """https://huggingface.co/moussaKam/mbarthez/resolve/main/tokenizer.json""",
"""moussaKam/barthez""": """https://huggingface.co/moussaKam/barthez/resolve/main/tokenizer.json""",
"""moussaKam/barthez-orangesum-title""": (
"""https://huggingface.co/moussaKam/barthez-orangesum-title/resolve/main/tokenizer.json"""
),
},
}
a : List[Any] = {
"""moussaKam/mbarthez""": 1_0_2_4,
"""moussaKam/barthez""": 1_0_2_4,
"""moussaKam/barthez-orangesum-title""": 1_0_2_4,
}
a : int = """▁"""
class UpperCamelCase_ ( __magic_name__ ):
lowercase = VOCAB_FILES_NAMES
lowercase = PRETRAINED_VOCAB_FILES_MAP
lowercase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase = ['input_ids', 'attention_mask']
lowercase = BarthezTokenizer
def __init__( self , A=None , A=None , A="<s>" , A="</s>" , A="</s>" , A="<s>" , A="<unk>" , A="<pad>" , A="<mask>" , **A , ) -> List[Any]:
# Mask token behave like a normal word, i.e. include the space before it
UpperCAmelCase : Tuple = AddedToken(A , lstrip=A , rstrip=A ) if isinstance(A , A ) else mask_token
super().__init__(
A , tokenizer_file=A , bos_token=A , eos_token=A , unk_token=A , sep_token=A , cls_token=A , pad_token=A , mask_token=A , **A , )
UpperCAmelCase : Union[str, Any] = vocab_file
UpperCAmelCase : int = False if not self.vocab_file else True
def _lowercase( self , A , A = None ) -> List[int]:
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
UpperCAmelCase : Optional[int] = [self.cls_token_id]
UpperCAmelCase : List[str] = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def _lowercase( self , A , A = None ) -> List[int]:
UpperCAmelCase : Optional[int] = [self.sep_token_id]
UpperCAmelCase : List[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def _lowercase( self , A , A = None ) -> Tuple[str]:
if not self.can_save_slow_tokenizer:
raise ValueError(
"""Your fast tokenizer does not have the necessary information to save the vocabulary for a slow """
"""tokenizer.""" )
if not os.path.isdir(A ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
UpperCAmelCase : str = os.path.join(
A , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(A ):
copyfile(self.vocab_file , A )
return (out_vocab_file,)
| 672 | 1 |
'''simple docstring'''
from transformers import DistilBertTokenizer, DistilBertTokenizerFast
from transformers.testing_utils import require_tokenizers, slow
from ..bert.test_tokenization_bert import BertTokenizationTest
@require_tokenizers
class UpperCamelCase_ ( __magic_name__ ):
lowercase = DistilBertTokenizer
lowercase = DistilBertTokenizerFast
lowercase = True
@slow
def _lowercase( self ) -> Optional[Any]:
UpperCAmelCase : Optional[Any] = DistilBertTokenizer.from_pretrained("""distilbert-base-uncased""" )
UpperCAmelCase : List[Any] = tokenizer.encode("""sequence builders""" , add_special_tokens=A )
UpperCAmelCase : Optional[int] = tokenizer.encode("""multi-sequence build""" , add_special_tokens=A )
UpperCAmelCase : int = tokenizer.build_inputs_with_special_tokens(A )
UpperCAmelCase : int = tokenizer.build_inputs_with_special_tokens(A , A )
assert encoded_sentence == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id]
assert encoded_pair == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [
tokenizer.sep_token_id
]
| 672 |
'''simple docstring'''
from collections.abc import Callable
import numpy as np
def __lowerCamelCase ( _lowercase , _lowercase , _lowercase , _lowercase , _lowercase ) -> np.array:
UpperCAmelCase : Optional[Any] = int(np.ceil((x_end - xa) / step_size ) )
UpperCAmelCase : str = np.zeros((n + 1,) )
UpperCAmelCase : Optional[Any] = ya
UpperCAmelCase : Union[str, Any] = xa
for k in range(_lowercase ):
UpperCAmelCase : Dict = y[k] + step_size * ode_func(_lowercase , y[k] )
UpperCAmelCase : Optional[int] = y[k] + (
(step_size / 2) * (ode_func(_lowercase , y[k] ) + ode_func(x + step_size , _lowercase ))
)
x += step_size
return y
if __name__ == "__main__":
import doctest
doctest.testmod()
| 672 | 1 |
'''simple docstring'''
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from transformers.generation import DisjunctiveConstraint
@require_torch
class UpperCamelCase_ ( unittest.TestCase ):
def _lowercase( self ) -> str:
# For consistency across different places the DisjunctiveConstraint is called,
# dc.token_ids is a list of integers. It is also initialized only by integers.
UpperCAmelCase : Optional[Any] = [[1, 2, 4], [1, 2, 3, 4]]
UpperCAmelCase : List[Any] = DisjunctiveConstraint(A )
self.assertTrue(isinstance(dc.token_ids , A ) )
with self.assertRaises(A ):
DisjunctiveConstraint(torch.LongTensor([[1, 2, 4], [1, 2, 3]] ) )
with self.assertRaises(A ):
DisjunctiveConstraint([torch.LongTensor([1, 2, 4] ), torch.LongTensor([1, 2, 3, 4, 5] )] )
def _lowercase( self ) -> Dict:
# We can't have constraints that are complete subsets of another. This leads to a preverse
# interpretation of "constraint fulfillment": does generating [1,2,3] fulfill the constraint?
# It would mean that it generated [1,2] which fulfills it, but it's in the middle of potentially
# fulfilling [1,2,3,4]. If we believe that [1,2,3] does fulfill the constraint, then the algorithm
# will necessarily never reach [1,2,3,4], giving users a false sense of control (better to just not allow it).
UpperCAmelCase : List[Any] = [[1, 2], [1, 2, 3, 4]]
with self.assertRaises(A ):
DisjunctiveConstraint(A ) # fails here
def _lowercase( self ) -> Tuple:
UpperCAmelCase : List[Any] = [[1, 2, 3], [1, 2, 4]]
UpperCAmelCase : List[Any] = DisjunctiveConstraint(A )
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : List[Any] = dc.update(1 )
UpperCAmelCase : Optional[Any] = stepped is True and completed is False and reset is False
self.assertTrue(A )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1] )
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : Optional[Any] = dc.update(2 )
UpperCAmelCase : List[str] = stepped is True and completed is False and reset is False
self.assertTrue(A )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2] )
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : Any = dc.update(3 )
UpperCAmelCase : str = stepped is True and completed is True and reset is False
self.assertTrue(A )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.current_seq == [1, 2, 3] )
def _lowercase( self ) -> Optional[Any]:
UpperCAmelCase : str = [[1, 2, 3], [1, 2, 4, 5], [1, 2, 5]]
UpperCAmelCase : str = DisjunctiveConstraint(A )
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : Any = dc.update(1 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1] )
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : Optional[int] = dc.update(2 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2] )
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : Any = dc.update(4 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2, 4] )
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : Tuple = dc.update(5 )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.current_seq == [1, 2, 4, 5] )
dc.reset()
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : Dict = dc.update(1 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.remaining() == 3 )
self.assertTrue(dc.current_seq == [1] )
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : Tuple = dc.update(2 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.remaining() == 2 )
self.assertTrue(dc.current_seq == [1, 2] )
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : int = dc.update(5 )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.remaining() == 0 )
self.assertTrue(dc.current_seq == [1, 2, 5] )
| 672 |
'''simple docstring'''
from typing import List, Union
import numpy as np
from ..tokenization_utils import TruncationStrategy
from ..utils import add_end_docstrings, logging
from .base import PIPELINE_INIT_ARGS, ArgumentHandler, ChunkPipeline
a : List[str] = logging.get_logger(__name__)
class UpperCamelCase_ ( __magic_name__ ):
def _lowercase( self , A ) -> Optional[int]:
if isinstance(A , A ):
UpperCAmelCase : Union[str, Any] = [label.strip() for label in labels.split(""",""" ) if label.strip()]
return labels
def __call__( self , A , A , A ) -> str:
if len(A ) == 0 or len(A ) == 0:
raise ValueError("""You must include at least one label and at least one sequence.""" )
if hypothesis_template.format(labels[0] ) == hypothesis_template:
raise ValueError(
(
"""The provided hypothesis_template \"{}\" was not able to be formatted with the target labels. """
"""Make sure the passed template includes formatting syntax such as {{}} where the label should go."""
).format(A ) )
if isinstance(A , A ):
UpperCAmelCase : Tuple = [sequences]
UpperCAmelCase : Optional[Any] = []
for sequence in sequences:
sequence_pairs.extend([[sequence, hypothesis_template.format(A )] for label in labels] )
return sequence_pairs, sequences
@add_end_docstrings(__magic_name__ )
class UpperCamelCase_ ( __magic_name__ ):
def __init__( self , A=ZeroShotClassificationArgumentHandler() , *A , **A ) -> Optional[int]:
UpperCAmelCase : Tuple = args_parser
super().__init__(*A , **A )
if self.entailment_id == -1:
logger.warning(
"""Failed to determine 'entailment' label id from the label2id mapping in the model config. Setting to """
"""-1. Define a descriptive label2id mapping in the model config to ensure correct outputs.""" )
@property
def _lowercase( self ) -> List[Any]:
for label, ind in self.model.config.labelaid.items():
if label.lower().startswith("""entail""" ):
return ind
return -1
def _lowercase( self , A , A=True , A=True , A=TruncationStrategy.ONLY_FIRST , **A ) -> str:
UpperCAmelCase : Tuple = self.framework
if self.tokenizer.pad_token is None:
# Override for tokenizers not supporting padding
logger.error(
"""Tokenizer was not supporting padding necessary for zero-shot, attempting to use """
""" `pad_token=eos_token`""" )
UpperCAmelCase : Any = self.tokenizer.eos_token
try:
UpperCAmelCase : Tuple = self.tokenizer(
A , add_special_tokens=A , return_tensors=A , padding=A , truncation=A , )
except Exception as e:
if "too short" in str(A ):
# tokenizers might yell that we want to truncate
# to a value that is not even reached by the input.
# In that case we don't want to truncate.
# It seems there's not a really better way to catch that
# exception.
UpperCAmelCase : List[str] = self.tokenizer(
A , add_special_tokens=A , return_tensors=A , padding=A , truncation=TruncationStrategy.DO_NOT_TRUNCATE , )
else:
raise e
return inputs
def _lowercase( self , **A ) -> Tuple:
if kwargs.get("""multi_class""" , A ) is not None:
UpperCAmelCase : Any = kwargs["""multi_class"""]
logger.warning(
"""The `multi_class` argument has been deprecated and renamed to `multi_label`. """
"""`multi_class` will be removed in a future version of Transformers.""" )
UpperCAmelCase : int = {}
if "candidate_labels" in kwargs:
UpperCAmelCase : Tuple = self._args_parser._parse_labels(kwargs["""candidate_labels"""] )
if "hypothesis_template" in kwargs:
UpperCAmelCase : List[Any] = kwargs["""hypothesis_template"""]
UpperCAmelCase : Dict = {}
if "multi_label" in kwargs:
UpperCAmelCase : Union[str, Any] = kwargs["""multi_label"""]
return preprocess_params, {}, postprocess_params
def __call__( self , A , *A , **A , ) -> Tuple:
if len(A ) == 0:
pass
elif len(A ) == 1 and "candidate_labels" not in kwargs:
UpperCAmelCase : Optional[Any] = args[0]
else:
raise ValueError(f'''Unable to understand extra arguments {args}''' )
return super().__call__(A , **A )
def _lowercase( self , A , A=None , A="This example is {}." ) -> List[Any]:
UpperCAmelCase , UpperCAmelCase : List[Any] = self._args_parser(A , A , A )
for i, (candidate_label, sequence_pair) in enumerate(zip(A , A ) ):
UpperCAmelCase : Any = self._parse_and_tokenize([sequence_pair] )
yield {
"candidate_label": candidate_label,
"sequence": sequences[0],
"is_last": i == len(A ) - 1,
**model_input,
}
def _lowercase( self , A ) -> Optional[int]:
UpperCAmelCase : Optional[Any] = inputs["""candidate_label"""]
UpperCAmelCase : Tuple = inputs["""sequence"""]
UpperCAmelCase : List[Any] = {k: inputs[k] for k in self.tokenizer.model_input_names}
UpperCAmelCase : Tuple = self.model(**A )
UpperCAmelCase : Optional[int] = {
"""candidate_label""": candidate_label,
"""sequence""": sequence,
"""is_last""": inputs["""is_last"""],
**outputs,
}
return model_outputs
def _lowercase( self , A , A=False ) -> List[str]:
UpperCAmelCase : Dict = [outputs["""candidate_label"""] for outputs in model_outputs]
UpperCAmelCase : List[Any] = [outputs["""sequence"""] for outputs in model_outputs]
UpperCAmelCase : List[Any] = np.concatenate([output["""logits"""].numpy() for output in model_outputs] )
UpperCAmelCase : Optional[Any] = logits.shape[0]
UpperCAmelCase : int = len(A )
UpperCAmelCase : List[Any] = N // n
UpperCAmelCase : int = logits.reshape((num_sequences, n, -1) )
if multi_label or len(A ) == 1:
# softmax over the entailment vs. contradiction dim for each label independently
UpperCAmelCase : str = self.entailment_id
UpperCAmelCase : str = -1 if entailment_id == 0 else 0
UpperCAmelCase : Optional[Any] = reshaped_outputs[..., [contradiction_id, entailment_id]]
UpperCAmelCase : int = np.exp(A ) / np.exp(A ).sum(-1 , keepdims=A )
UpperCAmelCase : int = scores[..., 1]
else:
# softmax the "entailment" logits over all candidate labels
UpperCAmelCase : Dict = reshaped_outputs[..., self.entailment_id]
UpperCAmelCase : Optional[int] = np.exp(A ) / np.exp(A ).sum(-1 , keepdims=A )
UpperCAmelCase : int = list(reversed(scores[0].argsort() ) )
return {
"sequence": sequences[0],
"labels": [candidate_labels[i] for i in top_inds],
"scores": scores[0, top_inds].tolist(),
}
| 672 | 1 |
'''simple docstring'''
import argparse
import pytorch_lightning as pl
import torch
from torch import nn
from transformers import LongformerForQuestionAnswering, LongformerModel
class UpperCamelCase_ ( pl.LightningModule ):
def __init__( self , A ) -> Dict:
super().__init__()
UpperCAmelCase : str = model
UpperCAmelCase : Any = 2
UpperCAmelCase : Tuple = nn.Linear(self.model.config.hidden_size , self.num_labels )
def _lowercase( self ) -> int:
pass
def __lowerCamelCase ( _lowercase , _lowercase , _lowercase ) -> Optional[int]:
# load longformer model from model identifier
UpperCAmelCase : List[Any] = LongformerModel.from_pretrained(_lowercase )
UpperCAmelCase : Optional[Any] = LightningModel(_lowercase )
UpperCAmelCase : Any = torch.load(_lowercase , map_location=torch.device("""cpu""" ) )
lightning_model.load_state_dict(ckpt["""state_dict"""] )
# init longformer question answering model
UpperCAmelCase : int = LongformerForQuestionAnswering.from_pretrained(_lowercase )
# transfer weights
longformer_for_qa.longformer.load_state_dict(lightning_model.model.state_dict() )
longformer_for_qa.qa_outputs.load_state_dict(lightning_model.qa_outputs.state_dict() )
longformer_for_qa.eval()
# save model
longformer_for_qa.save_pretrained(_lowercase )
print(F'''Conversion successful. Model saved under {pytorch_dump_folder_path}''' )
if __name__ == "__main__":
a : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--longformer_model""",
default=None,
type=str,
required=True,
help="""model identifier of longformer. Should be either `longformer-base-4096` or `longformer-large-4096`.""",
)
parser.add_argument(
"""--longformer_question_answering_ckpt_path""",
default=None,
type=str,
required=True,
help="""Path the official PyTorch Lightning Checkpoint.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
a : str = parser.parse_args()
convert_longformer_qa_checkpoint_to_pytorch(
args.longformer_model, args.longformer_question_answering_ckpt_path, args.pytorch_dump_folder_path
)
| 672 |
'''simple docstring'''
import unittest
from transformers import PegasusTokenizer, PegasusTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
a : List[Any] = get_tests_dir("""fixtures/test_sentencepiece_no_bos.model""")
@require_sentencepiece
@require_tokenizers
class UpperCamelCase_ ( __magic_name__ , unittest.TestCase ):
lowercase = PegasusTokenizer
lowercase = PegasusTokenizerFast
lowercase = True
lowercase = True
def _lowercase( self ) -> Tuple:
super().setUp()
# We have a SentencePiece fixture for testing
UpperCAmelCase : int = PegasusTokenizer(A )
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def _lowercase( self ) -> int:
return PegasusTokenizer.from_pretrained("""google/pegasus-large""" )
def _lowercase( self , **A ) -> PegasusTokenizer:
return PegasusTokenizer.from_pretrained(self.tmpdirname , **A )
def _lowercase( self , A ) -> List[str]:
return ("This is a test", "This is a test")
def _lowercase( self ) -> Optional[Any]:
UpperCAmelCase : Optional[int] = """</s>"""
UpperCAmelCase : Optional[int] = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(A ) , A )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(A ) , A )
def _lowercase( self ) -> Tuple:
UpperCAmelCase : str = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """<pad>""" )
self.assertEqual(vocab_keys[1] , """</s>""" )
self.assertEqual(vocab_keys[-1] , """v""" )
self.assertEqual(len(A ) , 1103 )
def _lowercase( self ) -> Optional[Any]:
self.assertEqual(self.get_tokenizer().vocab_size , 1103 )
def _lowercase( self ) -> int:
UpperCAmelCase : Any = self.rust_tokenizer_class.from_pretrained(self.tmpdirname )
UpperCAmelCase : Optional[Any] = self.tokenizer_class.from_pretrained(self.tmpdirname )
UpperCAmelCase : Union[str, Any] = (
"""Let's see which <unk> is the better <unk_token_11> one <mask_1> It seems like this <mask_2> was important"""
""" </s> <pad> <pad> <pad>"""
)
UpperCAmelCase : Optional[Any] = rust_tokenizer([raw_input_str] , return_tensors=A , add_special_tokens=A ).input_ids[0]
UpperCAmelCase : Optional[Any] = py_tokenizer([raw_input_str] , return_tensors=A , add_special_tokens=A ).input_ids[0]
self.assertListEqual(A , A )
def _lowercase( self ) -> Optional[Any]:
UpperCAmelCase : List[Any] = self._large_tokenizer
# <mask_1> masks whole sentence while <mask_2> masks single word
UpperCAmelCase : Any = """<mask_1> To ensure a <mask_2> flow of bank resolutions."""
UpperCAmelCase : Optional[Any] = [2, 413, 615, 114, 3, 1971, 113, 1679, 10710, 107, 1]
UpperCAmelCase : Optional[Any] = tokenizer([raw_input_str] , return_tensors=A ).input_ids[0]
self.assertListEqual(A , A )
def _lowercase( self ) -> int:
UpperCAmelCase : str = self._large_tokenizer
# The tracebacks for the following asserts are **better** without messages or self.assertEqual
assert tokenizer.vocab_size == 96103
assert tokenizer.pad_token_id == 0
assert tokenizer.eos_token_id == 1
assert tokenizer.offset == 103
assert tokenizer.unk_token_id == tokenizer.offset + 2 == 105
assert tokenizer.unk_token == "<unk>"
assert tokenizer.model_max_length == 1024
UpperCAmelCase : List[Any] = """To ensure a smooth flow of bank resolutions."""
UpperCAmelCase : Optional[int] = [413, 615, 114, 2291, 1971, 113, 1679, 10710, 107, 1]
UpperCAmelCase : Union[str, Any] = tokenizer([raw_input_str] , return_tensors=A ).input_ids[0]
self.assertListEqual(A , A )
assert tokenizer.convert_ids_to_tokens([0, 1, 2, 3] ) == ["<pad>", "</s>", "<mask_1>", "<mask_2>"]
@require_torch
def _lowercase( self ) -> Any:
UpperCAmelCase : int = ["""This is going to be way too long.""" * 150, """short example"""]
UpperCAmelCase : Optional[int] = ["""not super long but more than 5 tokens""", """tiny"""]
UpperCAmelCase : Tuple = self._large_tokenizer(A , padding=A , truncation=A , return_tensors="""pt""" )
UpperCAmelCase : List[Any] = self._large_tokenizer(
text_target=A , max_length=5 , padding=A , truncation=A , return_tensors="""pt""" )
assert batch.input_ids.shape == (2, 1024)
assert batch.attention_mask.shape == (2, 1024)
assert targets["input_ids"].shape == (2, 5)
assert len(A ) == 2 # input_ids, attention_mask.
@slow
def _lowercase( self ) -> List[str]:
# fmt: off
UpperCAmelCase : List[str] = {"""input_ids""": [[38979, 143, 18485, 606, 130, 26669, 87686, 121, 54189, 1129, 111, 26669, 87686, 121, 9114, 14787, 121, 13249, 158, 592, 956, 121, 14621, 31576, 143, 62613, 108, 9688, 930, 43430, 11562, 62613, 304, 108, 11443, 897, 108, 9314, 17415, 63399, 108, 11443, 7614, 18316, 118, 4284, 7148, 12430, 143, 1400, 25703, 158, 111, 4284, 7148, 11772, 143, 21297, 1064, 158, 122, 204, 3506, 1754, 1133, 14787, 1581, 115, 33224, 4482, 111, 1355, 110, 29173, 317, 50833, 108, 20147, 94665, 111, 77198, 107, 1], [110, 62613, 117, 638, 112, 1133, 121, 20098, 1355, 79050, 13872, 135, 1596, 53541, 1352, 141, 13039, 5542, 124, 302, 518, 111, 268, 2956, 115, 149, 4427, 107, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [139, 1235, 2799, 18289, 17780, 204, 109, 9474, 1296, 107, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=A , model_name="""google/bigbird-pegasus-large-arxiv""" , revision="""ba85d0851d708441f91440d509690f1ab6353415""" , )
@require_sentencepiece
@require_tokenizers
class UpperCamelCase_ ( __magic_name__ , unittest.TestCase ):
lowercase = PegasusTokenizer
lowercase = PegasusTokenizerFast
lowercase = True
lowercase = True
def _lowercase( self ) -> List[Any]:
super().setUp()
# We have a SentencePiece fixture for testing
UpperCAmelCase : int = PegasusTokenizer(A , offset=0 , mask_token_sent=A , mask_token="""[MASK]""" )
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def _lowercase( self ) -> Optional[Any]:
return PegasusTokenizer.from_pretrained("""google/bigbird-pegasus-large-arxiv""" )
def _lowercase( self , **A ) -> PegasusTokenizer:
return PegasusTokenizer.from_pretrained(self.tmpdirname , **A )
def _lowercase( self , A ) -> str:
return ("This is a test", "This is a test")
def _lowercase( self ) -> Union[str, Any]:
UpperCAmelCase : Dict = self.rust_tokenizer_class.from_pretrained(self.tmpdirname )
UpperCAmelCase : Any = self.tokenizer_class.from_pretrained(self.tmpdirname )
UpperCAmelCase : str = (
"""Let's see which <unk> is the better <unk_token> one [MASK] It seems like this [MASK] was important </s>"""
""" <pad> <pad> <pad>"""
)
UpperCAmelCase : List[str] = rust_tokenizer([raw_input_str] , return_tensors=A , add_special_tokens=A ).input_ids[0]
UpperCAmelCase : str = py_tokenizer([raw_input_str] , return_tensors=A , add_special_tokens=A ).input_ids[0]
self.assertListEqual(A , A )
@require_torch
def _lowercase( self ) -> Optional[int]:
UpperCAmelCase : Union[str, Any] = ["""This is going to be way too long.""" * 1000, """short example"""]
UpperCAmelCase : Any = ["""not super long but more than 5 tokens""", """tiny"""]
UpperCAmelCase : int = self._large_tokenizer(A , padding=A , truncation=A , return_tensors="""pt""" )
UpperCAmelCase : Optional[int] = self._large_tokenizer(
text_target=A , max_length=5 , padding=A , truncation=A , return_tensors="""pt""" )
assert batch.input_ids.shape == (2, 4096)
assert batch.attention_mask.shape == (2, 4096)
assert targets["input_ids"].shape == (2, 5)
assert len(A ) == 2 # input_ids, attention_mask.
def _lowercase( self ) -> int:
UpperCAmelCase : Union[str, Any] = (
"""This is an example string that is used to test the original TF implementation against the HF"""
""" implementation"""
)
UpperCAmelCase : Optional[Any] = self._large_tokenizer(A ).input_ids
self.assertListEqual(
A , [182, 117, 142, 587, 4211, 120, 117, 263, 112, 804, 109, 856, 25016, 3137, 464, 109, 26955, 3137, 1] , )
| 672 | 1 |
'''simple docstring'''
import math
from typing import Callable, List, Optional, Union
import numpy as np
import PIL
import torch
from PIL import Image
from transformers import CLIPTextModel, CLIPTokenizer
from diffusers.models import AutoencoderKL, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_upscale import StableDiffusionUpscalePipeline
from diffusers.schedulers import DDIMScheduler, DDPMScheduler, LMSDiscreteScheduler, PNDMScheduler
def __lowerCamelCase ( _lowercase , _lowercase , _lowercase=[] ) -> int:
UpperCAmelCase : Union[str, Any] = size[0] - overlap_pixels * 2
UpperCAmelCase : Optional[Any] = size[1] - overlap_pixels * 2
for letter in ["l", "r"]:
if letter in remove_borders:
size_x += overlap_pixels
for letter in ["t", "b"]:
if letter in remove_borders:
size_y += overlap_pixels
UpperCAmelCase : Any = np.ones((size_y, size_x) , dtype=np.uinta ) * 2_5_5
UpperCAmelCase : Dict = np.pad(_lowercase , mode="""linear_ramp""" , pad_width=_lowercase , end_values=0 )
if "l" in remove_borders:
UpperCAmelCase : Dict = mask[:, overlap_pixels : mask.shape[1]]
if "r" in remove_borders:
UpperCAmelCase : int = mask[:, 0 : mask.shape[1] - overlap_pixels]
if "t" in remove_borders:
UpperCAmelCase : Optional[Any] = mask[overlap_pixels : mask.shape[0], :]
if "b" in remove_borders:
UpperCAmelCase : Any = mask[0 : mask.shape[0] - overlap_pixels, :]
return mask
def __lowerCamelCase ( _lowercase , _lowercase , _lowercase ) -> List[Any]:
return max(_lowercase , min(_lowercase , _lowercase ) )
def __lowerCamelCase ( _lowercase , _lowercase , _lowercase ) -> List[str]:
return (
clamp(rect[0] , min[0] , max[0] ),
clamp(rect[1] , min[1] , max[1] ),
clamp(rect[2] , min[0] , max[0] ),
clamp(rect[3] , min[1] , max[1] ),
)
def __lowerCamelCase ( _lowercase , _lowercase , _lowercase ) -> Union[str, Any]:
UpperCAmelCase : Optional[Any] = list(_lowercase )
rect[0] -= overlap
rect[1] -= overlap
rect[2] += overlap
rect[3] += overlap
UpperCAmelCase : str = clamp_rect(_lowercase , [0, 0] , [image_size[0], image_size[1]] )
return rect
def __lowerCamelCase ( _lowercase , _lowercase , _lowercase , _lowercase ) -> List[Any]:
UpperCAmelCase : str = Image.new("""RGB""" , (tile.size[0] + original_slice, tile.size[1]) )
result.paste(
original_image.resize((tile.size[0], tile.size[1]) , Image.BICUBIC ).crop(
(slice_x, 0, slice_x + original_slice, tile.size[1]) ) , (0, 0) , )
result.paste(_lowercase , (original_slice, 0) )
return result
def __lowerCamelCase ( _lowercase , _lowercase ) -> str:
UpperCAmelCase : Optional[int] = (original_image_slice * 4, 0, tile.size[0], tile.size[1])
UpperCAmelCase : Dict = tile.crop(_lowercase )
return tile
def __lowerCamelCase ( _lowercase , _lowercase ) -> Optional[Any]:
UpperCAmelCase : Dict = n % d
return n - divisor
class UpperCamelCase_ ( __magic_name__ ):
def __init__( self , A , A , A , A , A , A , A = 350 , ) -> List[Any]:
super().__init__(
vae=A , text_encoder=A , tokenizer=A , unet=A , low_res_scheduler=A , scheduler=A , max_noise_level=A , )
def _lowercase( self , A , A , A , A , A , A , A , **A ) -> Tuple:
torch.manual_seed(0 )
UpperCAmelCase : List[Any] = (
min(image.size[0] - (tile_size + original_image_slice) , x * tile_size ),
min(image.size[1] - (tile_size + original_image_slice) , y * tile_size ),
min(image.size[0] , (x + 1) * tile_size ),
min(image.size[1] , (y + 1) * tile_size ),
)
UpperCAmelCase : Optional[int] = add_overlap_rect(A , A , image.size )
UpperCAmelCase : Any = image.crop(A )
UpperCAmelCase : int = ((crop_rect[0] + ((crop_rect[2] - crop_rect[0]) / 2)) / image.size[0]) * tile.size[0]
UpperCAmelCase : int = translated_slice_x - (original_image_slice / 2)
UpperCAmelCase : List[Any] = max(0 , A )
UpperCAmelCase : str = squeeze_tile(A , A , A , A )
UpperCAmelCase : Optional[int] = to_input.size
UpperCAmelCase : Optional[int] = to_input.resize((tile_size, tile_size) , Image.BICUBIC )
UpperCAmelCase : Dict = super(A , self ).__call__(image=A , **A ).images[0]
UpperCAmelCase : Optional[Any] = upscaled_tile.resize((orig_input_size[0] * 4, orig_input_size[1] * 4) , Image.BICUBIC )
UpperCAmelCase : str = unsqueeze_tile(A , A )
UpperCAmelCase : List[Any] = upscaled_tile.resize((tile.size[0] * 4, tile.size[1] * 4) , Image.BICUBIC )
UpperCAmelCase : List[Any] = []
if x == 0:
remove_borders.append("""l""" )
elif crop_rect[2] == image.size[0]:
remove_borders.append("""r""" )
if y == 0:
remove_borders.append("""t""" )
elif crop_rect[3] == image.size[1]:
remove_borders.append("""b""" )
UpperCAmelCase : Optional[int] = Image.fromarray(
make_transparency_mask(
(upscaled_tile.size[0], upscaled_tile.size[1]) , tile_border * 4 , remove_borders=A ) , mode="""L""" , )
final_image.paste(
A , (crop_rect_with_overlap[0] * 4, crop_rect_with_overlap[1] * 4) , A )
@torch.no_grad()
def __call__( self , A , A , A = 75 , A = 9.0 , A = 50 , A = None , A = 1 , A = 0.0 , A = None , A = None , A = None , A = 1 , A = 128 , A = 32 , A = 32 , ) -> List[str]:
UpperCAmelCase : List[Any] = Image.new("""RGB""" , (image.size[0] * 4, image.size[1] * 4) )
UpperCAmelCase : Any = math.ceil(image.size[0] / tile_size )
UpperCAmelCase : Any = math.ceil(image.size[1] / tile_size )
UpperCAmelCase : Tuple = tcx * tcy
UpperCAmelCase : Optional[int] = 0
for y in range(A ):
for x in range(A ):
self._process_tile(
A , A , A , A , A , A , A , prompt=A , num_inference_steps=A , guidance_scale=A , noise_level=A , negative_prompt=A , num_images_per_prompt=A , eta=A , generator=A , latents=A , )
current_count += 1
if callback is not None:
callback({"""progress""": current_count / total_tile_count, """image""": final_image} )
return final_image
def __lowerCamelCase ( ) -> int:
# Run a demo
UpperCAmelCase : int = """stabilityai/stable-diffusion-x4-upscaler"""
UpperCAmelCase : str = StableDiffusionTiledUpscalePipeline.from_pretrained(_lowercase , revision="""fp16""" , torch_dtype=torch.floataa )
UpperCAmelCase : Dict = pipe.to("""cuda""" )
UpperCAmelCase : Dict = Image.open("""../../docs/source/imgs/diffusers_library.jpg""" )
def callback(_lowercase ):
print(F'''progress: {obj['progress']:.4f}''' )
obj["image"].save("""diffusers_library_progress.jpg""" )
UpperCAmelCase : int = pipe(image=_lowercase , prompt="""Black font, white background, vector""" , noise_level=4_0 , callback=_lowercase )
final_image.save("""diffusers_library.jpg""" )
if __name__ == "__main__":
main()
| 672 |
'''simple docstring'''
import inspect
import unittest
from transformers import ConvNextVaConfig
from transformers.models.auto import get_values
from transformers.models.auto.modeling_auto import MODEL_FOR_BACKBONE_MAPPING_NAMES, MODEL_MAPPING_NAMES
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import ConvNextVaBackbone, ConvNextVaForImageClassification, ConvNextVaModel
from transformers.models.convnextva.modeling_convnextva import CONVNEXTV2_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class UpperCamelCase_ :
def __init__( self , A , A=13 , A=32 , A=3 , A=4 , A=[10, 20, 30, 40] , A=[2, 2, 3, 2] , A=True , A=True , A=37 , A="gelu" , A=10 , A=0.0_2 , A=["stage2", "stage3", "stage4"] , A=[2, 3, 4] , A=None , ) -> int:
UpperCAmelCase : str = parent
UpperCAmelCase : List[Any] = batch_size
UpperCAmelCase : Dict = image_size
UpperCAmelCase : Tuple = num_channels
UpperCAmelCase : Union[str, Any] = num_stages
UpperCAmelCase : Any = hidden_sizes
UpperCAmelCase : str = depths
UpperCAmelCase : Optional[int] = is_training
UpperCAmelCase : Union[str, Any] = use_labels
UpperCAmelCase : Any = intermediate_size
UpperCAmelCase : str = hidden_act
UpperCAmelCase : List[str] = num_labels
UpperCAmelCase : Tuple = initializer_range
UpperCAmelCase : Optional[Any] = out_features
UpperCAmelCase : List[str] = out_indices
UpperCAmelCase : Any = scope
def _lowercase( self ) -> Union[str, Any]:
UpperCAmelCase : Tuple = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCAmelCase : List[Any] = None
if self.use_labels:
UpperCAmelCase : Dict = ids_tensor([self.batch_size] , self.num_labels )
UpperCAmelCase : List[str] = self.get_config()
return config, pixel_values, labels
def _lowercase( self ) -> Optional[Any]:
return ConvNextVaConfig(
num_channels=self.num_channels , hidden_sizes=self.hidden_sizes , depths=self.depths , num_stages=self.num_stages , hidden_act=self.hidden_act , is_decoder=A , initializer_range=self.initializer_range , out_features=self.out_features , out_indices=self.out_indices , num_labels=self.num_labels , )
def _lowercase( self , A , A , A ) -> Optional[Any]:
UpperCAmelCase : int = ConvNextVaModel(config=A )
model.to(A )
model.eval()
UpperCAmelCase : List[Any] = model(A )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def _lowercase( self , A , A , A ) -> Any:
UpperCAmelCase : List[str] = ConvNextVaForImageClassification(A )
model.to(A )
model.eval()
UpperCAmelCase : int = model(A , labels=A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _lowercase( self , A , A , A ) -> Any:
UpperCAmelCase : Optional[Any] = ConvNextVaBackbone(config=A )
model.to(A )
model.eval()
UpperCAmelCase : Any = model(A )
# verify hidden states
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[1], 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , config.hidden_sizes[1:] )
# verify backbone works with out_features=None
UpperCAmelCase : Any = None
UpperCAmelCase : Optional[int] = ConvNextVaBackbone(config=A )
model.to(A )
model.eval()
UpperCAmelCase : int = model(A )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[-1], 1, 1] )
# verify channels
self.parent.assertEqual(len(model.channels ) , 1 )
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] )
def _lowercase( self ) -> List[str]:
UpperCAmelCase : Dict = self.prepare_config_and_inputs()
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : List[Any] = config_and_inputs
UpperCAmelCase : str = {"""pixel_values""": pixel_values}
return config, inputs_dict
def _lowercase( self ) -> List[Any]:
UpperCAmelCase : List[str] = self.prepare_config_and_inputs()
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : Optional[Any] = config_and_inputs
UpperCAmelCase : List[str] = {"""pixel_values""": pixel_values, """labels""": labels}
return config, inputs_dict
@require_torch
class UpperCamelCase_ ( __magic_name__ , __magic_name__ , unittest.TestCase ):
lowercase = (
(
ConvNextVaModel,
ConvNextVaForImageClassification,
ConvNextVaBackbone,
)
if is_torch_available()
else ()
)
lowercase = (
{'feature-extraction': ConvNextVaModel, 'image-classification': ConvNextVaForImageClassification}
if is_torch_available()
else {}
)
lowercase = False
lowercase = False
lowercase = False
lowercase = False
lowercase = False
def _lowercase( self ) -> Optional[int]:
UpperCAmelCase : Dict = ConvNextVaModelTester(self )
UpperCAmelCase : List[str] = ConfigTester(self , config_class=A , has_text_modality=A , hidden_size=37 )
def _lowercase( self ) -> int:
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def _lowercase( self ) -> List[str]:
return
@unittest.skip(reason="""ConvNextV2 does not use inputs_embeds""" )
def _lowercase( self ) -> Dict:
pass
@unittest.skip(reason="""ConvNextV2 does not support input and output embeddings""" )
def _lowercase( self ) -> Any:
pass
@unittest.skip(reason="""ConvNextV2 does not use feedforward chunking""" )
def _lowercase( self ) -> int:
pass
def _lowercase( self ) -> Dict:
if not self.model_tester.is_training:
return
for model_class in self.all_model_classes:
UpperCAmelCase , UpperCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs_with_labels()
UpperCAmelCase : Optional[int] = True
if model_class.__name__ in [
*get_values(A ),
*get_values(A ),
]:
continue
UpperCAmelCase : Any = model_class(A )
model.to(A )
model.train()
UpperCAmelCase : List[str] = self._prepare_for_class(A , A , return_labels=A )
UpperCAmelCase : List[str] = model(**A ).loss
loss.backward()
def _lowercase( self ) -> Tuple:
if not self.model_tester.is_training:
return
for model_class in self.all_model_classes:
UpperCAmelCase , UpperCAmelCase : Any = self.model_tester.prepare_config_and_inputs_with_labels()
UpperCAmelCase : List[str] = False
UpperCAmelCase : int = True
if (
model_class.__name__
in [*get_values(A ), *get_values(A )]
or not model_class.supports_gradient_checkpointing
):
continue
UpperCAmelCase : Dict = model_class(A )
model.to(A )
model.gradient_checkpointing_enable()
model.train()
UpperCAmelCase : Any = self._prepare_for_class(A , A , return_labels=A )
UpperCAmelCase : Any = model(**A ).loss
loss.backward()
def _lowercase( self ) -> Tuple:
UpperCAmelCase , UpperCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase : int = model_class(A )
UpperCAmelCase : List[str] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase : Tuple = [*signature.parameters.keys()]
UpperCAmelCase : Optional[Any] = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , A )
def _lowercase( self ) -> Optional[Any]:
UpperCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*A )
def _lowercase( self ) -> List[str]:
def check_hidden_states_output(A , A , A ):
UpperCAmelCase : Optional[Any] = model_class(A )
model.to(A )
model.eval()
with torch.no_grad():
UpperCAmelCase : Dict = model(**self._prepare_for_class(A , A ) )
UpperCAmelCase : Tuple = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
UpperCAmelCase : Optional[Any] = self.model_tester.num_stages
self.assertEqual(len(A ) , expected_num_stages + 1 )
# ConvNextV2's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
UpperCAmelCase , UpperCAmelCase : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase : str = True
check_hidden_states_output(A , A , A )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
UpperCAmelCase : int = True
check_hidden_states_output(A , A , A )
def _lowercase( self ) -> Tuple:
UpperCAmelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*A )
@slow
def _lowercase( self ) -> Any:
for model_name in CONVNEXTV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase : Tuple = ConvNextVaModel.from_pretrained(A )
self.assertIsNotNone(A )
def __lowerCamelCase ( ) -> Optional[int]:
UpperCAmelCase : Optional[int] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class UpperCamelCase_ ( unittest.TestCase ):
@cached_property
def _lowercase( self ) -> str:
return AutoImageProcessor.from_pretrained("""facebook/convnextv2-tiny-1k-224""" ) if is_vision_available() else None
@slow
def _lowercase( self ) -> List[Any]:
UpperCAmelCase : Any = ConvNextVaForImageClassification.from_pretrained("""facebook/convnextv2-tiny-1k-224""" ).to(A )
UpperCAmelCase : List[Any] = self.default_image_processor
UpperCAmelCase : Any = prepare_img()
UpperCAmelCase : Tuple = preprocessor(images=A , return_tensors="""pt""" ).to(A )
# forward pass
with torch.no_grad():
UpperCAmelCase : Optional[Any] = model(**A )
# verify the logits
UpperCAmelCase : Dict = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , A )
UpperCAmelCase : Dict = torch.tensor([0.9_9_9_6, 0.1_9_6_6, -0.4_3_8_6] ).to(A )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , A , atol=1e-4 ) )
| 672 | 1 |
'''simple docstring'''
import json
import os
import shutil
import tempfile
from unittest import TestCase
from transformers import BartTokenizer, BartTokenizerFast, DPRQuestionEncoderTokenizer, DPRQuestionEncoderTokenizerFast
from transformers.models.bart.configuration_bart import BartConfig
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES as DPR_VOCAB_FILES_NAMES
from transformers.models.dpr.configuration_dpr import DPRConfig
from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES as BART_VOCAB_FILES_NAMES
from transformers.testing_utils import require_faiss, require_tokenizers, require_torch, slow
from transformers.utils import is_datasets_available, is_faiss_available, is_torch_available
if is_torch_available() and is_datasets_available() and is_faiss_available():
from transformers.models.rag.configuration_rag import RagConfig
from transformers.models.rag.tokenization_rag import RagTokenizer
@require_faiss
@require_torch
class UpperCamelCase_ ( __magic_name__ ):
def _lowercase( self ) -> Dict:
UpperCAmelCase : int = tempfile.mkdtemp()
UpperCAmelCase : Union[str, Any] = 8
# DPR tok
UpperCAmelCase : Optional[Any] = [
"""[UNK]""",
"""[CLS]""",
"""[SEP]""",
"""[PAD]""",
"""[MASK]""",
"""want""",
"""##want""",
"""##ed""",
"""wa""",
"""un""",
"""runn""",
"""##ing""",
""",""",
"""low""",
"""lowest""",
]
UpperCAmelCase : Optional[Any] = os.path.join(self.tmpdirname , """dpr_tokenizer""" )
os.makedirs(A , exist_ok=A )
UpperCAmelCase : str = os.path.join(A , DPR_VOCAB_FILES_NAMES["""vocab_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) )
# BART tok
UpperCAmelCase : Any = [
"""l""",
"""o""",
"""w""",
"""e""",
"""r""",
"""s""",
"""t""",
"""i""",
"""d""",
"""n""",
"""\u0120""",
"""\u0120l""",
"""\u0120n""",
"""\u0120lo""",
"""\u0120low""",
"""er""",
"""\u0120lowest""",
"""\u0120newer""",
"""\u0120wider""",
"""<unk>""",
]
UpperCAmelCase : Optional[Any] = dict(zip(A , range(len(A ) ) ) )
UpperCAmelCase : Optional[Any] = ["""#version: 0.2""", """\u0120 l""", """\u0120l o""", """\u0120lo w""", """e r""", """"""]
UpperCAmelCase : Optional[Any] = {"""unk_token""": """<unk>"""}
UpperCAmelCase : List[str] = os.path.join(self.tmpdirname , """bart_tokenizer""" )
os.makedirs(A , exist_ok=A )
UpperCAmelCase : Tuple = os.path.join(A , BART_VOCAB_FILES_NAMES["""vocab_file"""] )
UpperCAmelCase : Optional[Any] = os.path.join(A , BART_VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(A ) + """\n""" )
with open(self.merges_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write("""\n""".join(A ) )
def _lowercase( self ) -> DPRQuestionEncoderTokenizer:
return DPRQuestionEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname , """dpr_tokenizer""" ) )
def _lowercase( self ) -> BartTokenizer:
return BartTokenizer.from_pretrained(os.path.join(self.tmpdirname , """bart_tokenizer""" ) )
def _lowercase( self ) -> Dict:
shutil.rmtree(self.tmpdirname )
@require_tokenizers
def _lowercase( self ) -> Optional[Any]:
UpperCAmelCase : Optional[Any] = os.path.join(self.tmpdirname , """rag_tokenizer""" )
UpperCAmelCase : List[Any] = RagConfig(question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() )
UpperCAmelCase : List[Any] = RagTokenizer(question_encoder=self.get_dpr_tokenizer() , generator=self.get_bart_tokenizer() )
rag_config.save_pretrained(A )
rag_tokenizer.save_pretrained(A )
UpperCAmelCase : Optional[int] = RagTokenizer.from_pretrained(A , config=A )
self.assertIsInstance(new_rag_tokenizer.question_encoder , A )
self.assertEqual(new_rag_tokenizer.question_encoder.get_vocab() , rag_tokenizer.question_encoder.get_vocab() )
self.assertIsInstance(new_rag_tokenizer.generator , A )
self.assertEqual(new_rag_tokenizer.generator.get_vocab() , rag_tokenizer.generator.get_vocab() )
@slow
def _lowercase( self ) -> str:
UpperCAmelCase : Optional[Any] = RagTokenizer.from_pretrained("""facebook/rag-token-nq""" )
UpperCAmelCase : str = [
"""who got the first nobel prize in physics""",
"""when is the next deadpool movie being released""",
"""which mode is used for short wave broadcast service""",
"""who is the owner of reading football club""",
"""when is the next scandal episode coming out""",
"""when is the last time the philadelphia won the superbowl""",
"""what is the most current adobe flash player version""",
"""how many episodes are there in dragon ball z""",
"""what is the first step in the evolution of the eye""",
"""where is gall bladder situated in human body""",
"""what is the main mineral in lithium batteries""",
"""who is the president of usa right now""",
"""where do the greasers live in the outsiders""",
"""panda is a national animal of which country""",
"""what is the name of manchester united stadium""",
]
UpperCAmelCase : List[str] = tokenizer(A )
self.assertIsNotNone(A )
@slow
def _lowercase( self ) -> List[str]:
UpperCAmelCase : Union[str, Any] = RagTokenizer.from_pretrained("""facebook/rag-sequence-nq""" )
UpperCAmelCase : List[str] = [
"""who got the first nobel prize in physics""",
"""when is the next deadpool movie being released""",
"""which mode is used for short wave broadcast service""",
"""who is the owner of reading football club""",
"""when is the next scandal episode coming out""",
"""when is the last time the philadelphia won the superbowl""",
"""what is the most current adobe flash player version""",
"""how many episodes are there in dragon ball z""",
"""what is the first step in the evolution of the eye""",
"""where is gall bladder situated in human body""",
"""what is the main mineral in lithium batteries""",
"""who is the president of usa right now""",
"""where do the greasers live in the outsiders""",
"""panda is a national animal of which country""",
"""what is the name of manchester united stadium""",
]
UpperCAmelCase : int = tokenizer(A )
self.assertIsNotNone(A )
| 672 |
'''simple docstring'''
from scipy.stats import pearsonr
import datasets
a : str = """
Pearson correlation coefficient and p-value for testing non-correlation.
The Pearson correlation coefficient measures the linear relationship between two datasets. The calculation of the p-value relies on the assumption that each dataset is normally distributed. Like other correlation coefficients, this one varies between -1 and +1 with 0 implying no correlation. Correlations of -1 or +1 imply an exact linear relationship. Positive correlations imply that as x increases, so does y. Negative correlations imply that as x increases, y decreases.
The p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Pearson correlation at least as extreme as the one computed from these datasets.
"""
a : Dict = """
Args:
predictions (`list` of `int`): Predicted class labels, as returned by a model.
references (`list` of `int`): Ground truth labels.
return_pvalue (`boolean`): If `True`, returns the p-value, along with the correlation coefficient. If `False`, returns only the correlation coefficient. Defaults to `False`.
Returns:
pearsonr (`float`): Pearson correlation coefficient. Minimum possible value is -1. Maximum possible value is 1. Values of 1 and -1 indicate exact linear positive and negative relationships, respectively. A value of 0 implies no correlation.
p-value (`float`): P-value, which roughly indicates the probability of an The p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Pearson correlation at least as extreme as the one computed from these datasets. Minimum possible value is 0. Maximum possible value is 1. Higher values indicate higher probabilities.
Examples:
Example 1-A simple example using only predictions and references.
>>> pearsonr_metric = datasets.load_metric(\"pearsonr\")
>>> results = pearsonr_metric.compute(predictions=[10, 9, 2.5, 6, 4], references=[1, 2, 3, 4, 5])
>>> print(round(results['pearsonr'], 2))
-0.74
Example 2-The same as Example 1, but that also returns the `p-value`.
>>> pearsonr_metric = datasets.load_metric(\"pearsonr\")
>>> results = pearsonr_metric.compute(predictions=[10, 9, 2.5, 6, 4], references=[1, 2, 3, 4, 5], return_pvalue=True)
>>> print(sorted(list(results.keys())))
['p-value', 'pearsonr']
>>> print(round(results['pearsonr'], 2))
-0.74
>>> print(round(results['p-value'], 2))
0.15
"""
a : Optional[int] = """
@article{2020SciPy-NMeth,
author = {Virtanen, Pauli and Gommers, Ralf and Oliphant, Travis E. and
Haberland, Matt and Reddy, Tyler and Cournapeau, David and
Burovski, Evgeni and Peterson, Pearu and Weckesser, Warren and
Bright, Jonathan and {van der Walt}, St{\'e}fan J. and
Brett, Matthew and Wilson, Joshua and Millman, K. Jarrod and
Mayorov, Nikolay and Nelson, Andrew R. J. and Jones, Eric and
Kern, Robert and Larson, Eric and Carey, C J and
Polat, Ilhan and Feng, Yu and Moore, Eric W. and
{VanderPlas}, Jake and Laxalde, Denis and Perktold, Josef and
Cimrman, Robert and Henriksen, Ian and Quintero, E. A. and
Harris, Charles R. and Archibald, Anne M. and
Ribeiro, Antonio H. and Pedregosa, Fabian and
{van Mulbregt}, Paul and {SciPy 1.0 Contributors}},
title = {{{SciPy} 1.0: Fundamental Algorithms for Scientific
Computing in Python}},
journal = {Nature Methods},
year = {2020},
volume = {17},
pages = {261--272},
adsurl = {https://rdcu.be/b08Wh},
doi = {10.1038/s41592-019-0686-2},
}
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class UpperCamelCase_ ( datasets.Metric ):
def _lowercase( self ) -> Optional[int]:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Value("""float""" ),
"""references""": datasets.Value("""float""" ),
} ) , reference_urls=["""https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.pearsonr.html"""] , )
def _lowercase( self , A , A , A=False ) -> int:
if return_pvalue:
UpperCAmelCase : int = pearsonr(A , A )
return {"pearsonr": results[0], "p-value": results[1]}
else:
return {"pearsonr": float(pearsonr(A , A )[0] )}
| 672 | 1 |
'''simple docstring'''
a : int = tuple[float, float, float]
a : Optional[int] = tuple[float, float, float]
def __lowerCamelCase ( _lowercase , _lowercase ) -> Vectorad:
UpperCAmelCase : Any = end_pointa[0] - end_pointa[0]
UpperCAmelCase : List[Any] = end_pointa[1] - end_pointa[1]
UpperCAmelCase : Any = end_pointa[2] - end_pointa[2]
return (x, y, z)
def __lowerCamelCase ( _lowercase , _lowercase ) -> Vectorad:
UpperCAmelCase : Optional[int] = ab[1] * ac[2] - ab[2] * ac[1] # *i
UpperCAmelCase : Any = (ab[0] * ac[2] - ab[2] * ac[0]) * -1 # *j
UpperCAmelCase : Any = ab[0] * ac[1] - ab[1] * ac[0] # *k
return (x, y, z)
def __lowerCamelCase ( _lowercase , _lowercase ) -> bool:
return tuple(round(_lowercase , _lowercase ) for x in vector ) == (0, 0, 0)
def __lowerCamelCase ( _lowercase , _lowercase , _lowercase , _lowercase = 1_0 ) -> bool:
UpperCAmelCase : int = create_vector(_lowercase , _lowercase )
UpperCAmelCase : Union[str, Any] = create_vector(_lowercase , _lowercase )
return is_zero_vector(get_ad_vectors_cross(_lowercase , _lowercase ) , _lowercase )
| 672 |
'''simple docstring'''
from __future__ import annotations
from collections.abc import Sequence
from typing import Literal
def __lowerCamelCase ( _lowercase , _lowercase ) -> str | Literal[False]:
UpperCAmelCase : Optional[int] = list(_lowercase )
UpperCAmelCase : Dict = list(_lowercase )
UpperCAmelCase : str = 0
for i in range(len(_lowercase ) ):
if lista[i] != lista[i]:
count += 1
UpperCAmelCase : Optional[Any] = """_"""
if count > 1:
return False
else:
return "".join(_lowercase )
def __lowerCamelCase ( _lowercase ) -> list[str]:
UpperCAmelCase : List[str] = []
while True:
UpperCAmelCase : Optional[int] = ["""$"""] * len(_lowercase )
UpperCAmelCase : int = []
for i in range(len(_lowercase ) ):
for j in range(i + 1 , len(_lowercase ) ):
UpperCAmelCase : str = compare_string(binary[i] , binary[j] )
if k is False:
UpperCAmelCase : Union[str, Any] = """*"""
UpperCAmelCase : Optional[Any] = """*"""
temp.append("""X""" )
for i in range(len(_lowercase ) ):
if checka[i] == "$":
pi.append(binary[i] )
if len(_lowercase ) == 0:
return pi
UpperCAmelCase : List[Any] = list(set(_lowercase ) )
def __lowerCamelCase ( _lowercase , _lowercase ) -> list[str]:
UpperCAmelCase : Dict = []
for minterm in minterms:
UpperCAmelCase : List[str] = """"""
for _ in range(_lowercase ):
UpperCAmelCase : Dict = str(minterm % 2 ) + string
minterm //= 2
temp.append(_lowercase )
return temp
def __lowerCamelCase ( _lowercase , _lowercase , _lowercase ) -> bool:
UpperCAmelCase : Optional[int] = list(_lowercase )
UpperCAmelCase : Dict = list(_lowercase )
UpperCAmelCase : Dict = 0
for i in range(len(_lowercase ) ):
if lista[i] != lista[i]:
count_n += 1
return count_n == count
def __lowerCamelCase ( _lowercase , _lowercase ) -> list[str]:
UpperCAmelCase : Tuple = []
UpperCAmelCase : Optional[int] = [0] * len(_lowercase )
for i in range(len(chart[0] ) ):
UpperCAmelCase : Any = 0
UpperCAmelCase : Optional[Any] = -1
for j in range(len(_lowercase ) ):
if chart[j][i] == 1:
count += 1
UpperCAmelCase : str = j
if count == 1:
UpperCAmelCase : Optional[int] = 1
for i in range(len(_lowercase ) ):
if select[i] == 1:
for j in range(len(chart[0] ) ):
if chart[i][j] == 1:
for k in range(len(_lowercase ) ):
UpperCAmelCase : List[str] = 0
temp.append(prime_implicants[i] )
while True:
UpperCAmelCase : int = 0
UpperCAmelCase : Tuple = -1
UpperCAmelCase : Union[str, Any] = 0
for i in range(len(_lowercase ) ):
UpperCAmelCase : Optional[Any] = chart[i].count(1 )
if count_n > max_n:
UpperCAmelCase : Union[str, Any] = count_n
UpperCAmelCase : Optional[Any] = i
if max_n == 0:
return temp
temp.append(prime_implicants[rem] )
for i in range(len(chart[0] ) ):
if chart[rem][i] == 1:
for j in range(len(_lowercase ) ):
UpperCAmelCase : Optional[Any] = 0
def __lowerCamelCase ( _lowercase , _lowercase ) -> list[list[int]]:
UpperCAmelCase : Optional[int] = [[0 for x in range(len(_lowercase ) )] for x in range(len(_lowercase ) )]
for i in range(len(_lowercase ) ):
UpperCAmelCase : Tuple = prime_implicants[i].count("""_""" )
for j in range(len(_lowercase ) ):
if is_for_table(prime_implicants[i] , binary[j] , _lowercase ):
UpperCAmelCase : List[Any] = 1
return chart
def __lowerCamelCase ( ) -> None:
UpperCAmelCase : str = int(input("""Enter the no. of variables\n""" ) )
UpperCAmelCase : List[Any] = [
float(_lowercase )
for x in input(
"""Enter the decimal representation of Minterms 'Spaces Separated'\n""" ).split()
]
UpperCAmelCase : str = decimal_to_binary(_lowercase , _lowercase )
UpperCAmelCase : Tuple = check(_lowercase )
print("""Prime Implicants are:""" )
print(_lowercase )
UpperCAmelCase : Union[str, Any] = prime_implicant_chart(_lowercase , _lowercase )
UpperCAmelCase : Tuple = selection(_lowercase , _lowercase )
print("""Essential Prime Implicants are:""" )
print(_lowercase )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 672 | 1 |
'''simple docstring'''
import unittest
import numpy as np
import torch
from diffusers import ScoreSdeVePipeline, ScoreSdeVeScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class UpperCamelCase_ ( unittest.TestCase ):
@property
def _lowercase( self ) -> List[str]:
torch.manual_seed(0 )
UpperCAmelCase : Tuple = UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=("""DownBlock2D""", """AttnDownBlock2D""") , up_block_types=("""AttnUpBlock2D""", """UpBlock2D""") , )
return model
def _lowercase( self ) -> Dict:
UpperCAmelCase : Tuple = self.dummy_uncond_unet
UpperCAmelCase : Dict = ScoreSdeVeScheduler()
UpperCAmelCase : int = ScoreSdeVePipeline(unet=A , scheduler=A )
sde_ve.to(A )
sde_ve.set_progress_bar_config(disable=A )
UpperCAmelCase : Tuple = torch.manual_seed(0 )
UpperCAmelCase : Any = sde_ve(num_inference_steps=2 , output_type="""numpy""" , generator=A ).images
UpperCAmelCase : Optional[int] = torch.manual_seed(0 )
UpperCAmelCase : Dict = sde_ve(num_inference_steps=2 , output_type="""numpy""" , generator=A , return_dict=A )[
0
]
UpperCAmelCase : Union[str, Any] = image[0, -3:, -3:, -1]
UpperCAmelCase : str = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
UpperCAmelCase : Dict = np.array([0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
@slow
@require_torch
class UpperCamelCase_ ( unittest.TestCase ):
def _lowercase( self ) -> int:
UpperCAmelCase : List[str] = """google/ncsnpp-church-256"""
UpperCAmelCase : Dict = UNetaDModel.from_pretrained(A )
UpperCAmelCase : List[str] = ScoreSdeVeScheduler.from_pretrained(A )
UpperCAmelCase : Any = ScoreSdeVePipeline(unet=A , scheduler=A )
sde_ve.to(A )
sde_ve.set_progress_bar_config(disable=A )
UpperCAmelCase : Tuple = torch.manual_seed(0 )
UpperCAmelCase : Optional[Any] = sde_ve(num_inference_steps=10 , output_type="""numpy""" , generator=A ).images
UpperCAmelCase : Optional[int] = image[0, -3:, -3:, -1]
assert image.shape == (1, 256, 256, 3)
UpperCAmelCase : List[Any] = np.array([0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 672 |
'''simple docstring'''
a : Tuple = [sum(int(c, 1_0) ** 2 for c in i.__str__()) for i in range(1_0_0_0_0_0)]
def __lowerCamelCase ( _lowercase ) -> int:
UpperCAmelCase : str = 0
while number:
# Increased Speed Slightly by checking every 5 digits together.
sum_of_digits_squared += DIGITS_SQUARED[number % 1_0_0_0_0_0]
number //= 1_0_0_0_0_0
return sum_of_digits_squared
# There are 2 Chains made,
# One ends with 89 with the chain member 58 being the one which when declared first,
# there will be the least number of iterations for all the members to be checked.
# The other one ends with 1 and has only one element 1.
# So 58 and 1 are chosen to be declared at the starting.
# Changed dictionary to an array to quicken the solution
a : list[bool | None] = [None] * 1_0_0_0_0_0_0_0
a : Optional[Any] = True
a : List[Any] = False
def __lowerCamelCase ( _lowercase ) -> bool:
if CHAINS[number - 1] is not None:
return CHAINS[number - 1] # type: ignore
UpperCAmelCase : List[str] = chain(next_number(_lowercase ) )
UpperCAmelCase : Tuple = number_chain
while number < 1_0_0_0_0_0_0_0:
UpperCAmelCase : List[str] = number_chain
number *= 1_0
return number_chain
def __lowerCamelCase ( _lowercase = 1_0_0_0_0_0_0_0 ) -> int:
for i in range(1 , _lowercase ):
if CHAINS[i] is None:
chain(i + 1 )
return CHAINS[:number].count(_lowercase )
if __name__ == "__main__":
import doctest
doctest.testmod()
print(F'''{solution() = }''')
| 672 | 1 |
'''simple docstring'''
import logging
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
import torch
from datasets import load_dataset
from torchvision.transforms import Compose, Lambda, Normalize, RandomHorizontalFlip, RandomResizedCrop, ToTensor
from torchvision.transforms.functional import InterpolationMode
import transformers
from transformers import (
HfArgumentParser,
Trainer,
TrainingArguments,
ViTImageProcessor,
ViTMAEConfig,
ViTMAEForPreTraining,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
a : Any = logging.getLogger(__name__)
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version("""4.31.0""")
require_version("""datasets>=1.8.0""", """To fix: pip install -r examples/pytorch/image-pretraining/requirements.txt""")
@dataclass
class UpperCamelCase_ :
lowercase = field(
default='cifar10' , metadata={'help': 'Name of a dataset from the datasets package'} )
lowercase = field(
default=__magic_name__ , metadata={'help': 'The configuration name of the dataset to use (via the datasets library).'} )
lowercase = field(
default=__magic_name__ , metadata={'help': 'The column name of the images in the files.'} )
lowercase = field(default=__magic_name__ , metadata={'help': 'A folder containing the training data.'} )
lowercase = field(default=__magic_name__ , metadata={'help': 'A folder containing the validation data.'} )
lowercase = field(
default=0.15 , metadata={'help': 'Percent to split off of train for validation.'} )
lowercase = field(
default=__magic_name__ , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of training examples to this '
'value if set.'
)
} , )
lowercase = field(
default=__magic_name__ , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of evaluation examples to this '
'value if set.'
)
} , )
def _lowercase( self ) -> Tuple:
UpperCAmelCase : Optional[Any] = {}
if self.train_dir is not None:
UpperCAmelCase : List[Any] = self.train_dir
if self.validation_dir is not None:
UpperCAmelCase : Tuple = self.validation_dir
UpperCAmelCase : Tuple = data_files if data_files else None
@dataclass
class UpperCamelCase_ :
lowercase = field(
default=__magic_name__ , metadata={
'help': (
'The model checkpoint for weights initialization.Don\'t set if you want to train a model from scratch.'
)
} , )
lowercase = field(
default=__magic_name__ , metadata={'help': 'Pretrained config name or path if not the same as model_name_or_path'} )
lowercase = field(
default=__magic_name__ , metadata={
'help': (
'Override some existing default config settings when a model is trained from scratch. Example: '
'n_embd=10,resid_pdrop=0.2,scale_attn_weights=false,summary_type=cls_index'
)
} , )
lowercase = field(
default=__magic_name__ , metadata={'help': 'Where do you want to store the pretrained models downloaded from s3'} )
lowercase = field(
default='main' , metadata={'help': 'The specific model version to use (can be a branch name, tag name or commit id).'} , )
lowercase = field(default=__magic_name__ , metadata={'help': 'Name or path of preprocessor config.'} )
lowercase = field(
default=__magic_name__ , metadata={
'help': (
'Will use the token generated when running `huggingface-cli login` (necessary to use this script '
'with private models).'
)
} , )
lowercase = field(
default=0.75 , metadata={'help': 'The ratio of the number of masked tokens in the input sequence.'} )
lowercase = field(
default=__magic_name__ , metadata={'help': 'Whether or not to train with normalized pixel values as target.'} )
@dataclass
class UpperCamelCase_ ( __magic_name__ ):
lowercase = field(
default=1e-3 , metadata={'help': 'Base learning rate: absolute_lr = base_lr * total_batch_size / 256.'} )
def __lowerCamelCase ( _lowercase ) -> int:
UpperCAmelCase : str = torch.stack([example["""pixel_values"""] for example in examples] )
return {"pixel_values": pixel_values}
def __lowerCamelCase ( ) -> Union[str, Any]:
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
UpperCAmelCase : str = HfArgumentParser((ModelArguments, DataTrainingArguments, CustomTrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(""".json""" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : int = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : List[str] = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry("""run_mae""" , _lowercase , _lowercase )
# Setup logging
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
UpperCAmelCase : Dict = training_args.get_process_log_level()
logger.setLevel(_lowercase )
transformers.utils.logging.set_verbosity(_lowercase )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
F'''Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}'''
+ F'''distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}''' )
logger.info(F'''Training/evaluation parameters {training_args}''' )
# Detecting last checkpoint.
UpperCAmelCase : Dict = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
UpperCAmelCase : int = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F'''Output directory ({training_args.output_dir}) already exists and is not empty. '''
"""Use --overwrite_output_dir to overcome.""" )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
F'''Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change '''
"""the `--output_dir` or add `--overwrite_output_dir` to train from scratch.""" )
# Initialize our dataset.
UpperCAmelCase : Union[str, Any] = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , data_files=data_args.data_files , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
# If we don't have a validation split, split off a percentage of train as validation.
UpperCAmelCase : Tuple = None if """validation""" in ds.keys() else data_args.train_val_split
if isinstance(data_args.train_val_split , _lowercase ) and data_args.train_val_split > 0.0:
UpperCAmelCase : Optional[int] = ds["""train"""].train_test_split(data_args.train_val_split )
UpperCAmelCase : Union[str, Any] = split["""train"""]
UpperCAmelCase : List[Any] = split["""test"""]
# Load pretrained model and image processor
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
UpperCAmelCase : Optional[int] = {
"""cache_dir""": model_args.cache_dir,
"""revision""": model_args.model_revision,
"""use_auth_token""": True if model_args.use_auth_token else None,
}
if model_args.config_name:
UpperCAmelCase : Dict = ViTMAEConfig.from_pretrained(model_args.config_name , **_lowercase )
elif model_args.model_name_or_path:
UpperCAmelCase : List[str] = ViTMAEConfig.from_pretrained(model_args.model_name_or_path , **_lowercase )
else:
UpperCAmelCase : Tuple = ViTMAEConfig()
logger.warning("""You are instantiating a new config instance from scratch.""" )
if model_args.config_overrides is not None:
logger.info(F'''Overriding config: {model_args.config_overrides}''' )
config.update_from_string(model_args.config_overrides )
logger.info(F'''New config: {config}''' )
# adapt config
config.update(
{
"""mask_ratio""": model_args.mask_ratio,
"""norm_pix_loss""": model_args.norm_pix_loss,
} )
# create image processor
if model_args.image_processor_name:
UpperCAmelCase : List[str] = ViTImageProcessor.from_pretrained(model_args.image_processor_name , **_lowercase )
elif model_args.model_name_or_path:
UpperCAmelCase : Union[str, Any] = ViTImageProcessor.from_pretrained(model_args.model_name_or_path , **_lowercase )
else:
UpperCAmelCase : Optional[Any] = ViTImageProcessor()
# create model
if model_args.model_name_or_path:
UpperCAmelCase : int = ViTMAEForPreTraining.from_pretrained(
model_args.model_name_or_path , from_tf=bool(""".ckpt""" in model_args.model_name_or_path ) , config=_lowercase , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
else:
logger.info("""Training new model from scratch""" )
UpperCAmelCase : Dict = ViTMAEForPreTraining(_lowercase )
if training_args.do_train:
UpperCAmelCase : List[Any] = ds["""train"""].column_names
else:
UpperCAmelCase : Optional[int] = ds["""validation"""].column_names
if data_args.image_column_name is not None:
UpperCAmelCase : Tuple = data_args.image_column_name
elif "image" in column_names:
UpperCAmelCase : int = """image"""
elif "img" in column_names:
UpperCAmelCase : Tuple = """img"""
else:
UpperCAmelCase : Union[str, Any] = column_names[0]
# transformations as done in original MAE paper
# source: https://github.com/facebookresearch/mae/blob/main/main_pretrain.py
if "shortest_edge" in image_processor.size:
UpperCAmelCase : Union[str, Any] = image_processor.size["""shortest_edge"""]
else:
UpperCAmelCase : Tuple = (image_processor.size["""height"""], image_processor.size["""width"""])
UpperCAmelCase : Tuple = Compose(
[
Lambda(lambda _lowercase : img.convert("""RGB""" ) if img.mode != "RGB" else img ),
RandomResizedCrop(_lowercase , scale=(0.2, 1.0) , interpolation=InterpolationMode.BICUBIC ),
RandomHorizontalFlip(),
ToTensor(),
Normalize(mean=image_processor.image_mean , std=image_processor.image_std ),
] )
def preprocess_images(_lowercase ):
UpperCAmelCase : Any = [transforms(_lowercase ) for image in examples[image_column_name]]
return examples
if training_args.do_train:
if "train" not in ds:
raise ValueError("""--do_train requires a train dataset""" )
if data_args.max_train_samples is not None:
UpperCAmelCase : int = ds["""train"""].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) )
# Set the training transforms
ds["train"].set_transform(_lowercase )
if training_args.do_eval:
if "validation" not in ds:
raise ValueError("""--do_eval requires a validation dataset""" )
if data_args.max_eval_samples is not None:
UpperCAmelCase : str = (
ds["""validation"""].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) )
)
# Set the validation transforms
ds["validation"].set_transform(_lowercase )
# Compute absolute learning rate
UpperCAmelCase : Dict = (
training_args.train_batch_size * training_args.gradient_accumulation_steps * training_args.world_size
)
if training_args.base_learning_rate is not None:
UpperCAmelCase : Tuple = training_args.base_learning_rate * total_train_batch_size / 2_5_6
# Initialize our trainer
UpperCAmelCase : Union[str, Any] = Trainer(
model=_lowercase , args=_lowercase , train_dataset=ds["""train"""] if training_args.do_train else None , eval_dataset=ds["""validation"""] if training_args.do_eval else None , tokenizer=_lowercase , data_collator=_lowercase , )
# Training
if training_args.do_train:
UpperCAmelCase : Any = None
if training_args.resume_from_checkpoint is not None:
UpperCAmelCase : Optional[int] = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
UpperCAmelCase : int = last_checkpoint
UpperCAmelCase : Tuple = trainer.train(resume_from_checkpoint=_lowercase )
trainer.save_model()
trainer.log_metrics("""train""" , train_result.metrics )
trainer.save_metrics("""train""" , train_result.metrics )
trainer.save_state()
# Evaluation
if training_args.do_eval:
UpperCAmelCase : Dict = trainer.evaluate()
trainer.log_metrics("""eval""" , _lowercase )
trainer.save_metrics("""eval""" , _lowercase )
# Write model card and (optionally) push to hub
UpperCAmelCase : Dict = {
"""tasks""": """masked-auto-encoding""",
"""dataset""": data_args.dataset_name,
"""tags""": ["""masked-auto-encoding"""],
}
if training_args.push_to_hub:
trainer.push_to_hub(**_lowercase )
else:
trainer.create_model_card(**_lowercase )
def __lowerCamelCase ( _lowercase ) -> int:
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 672 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tensorflow_text_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
a : Optional[Any] = {
"""configuration_bert""": ["""BERT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """BertConfig""", """BertOnnxConfig"""],
"""tokenization_bert""": ["""BasicTokenizer""", """BertTokenizer""", """WordpieceTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : Dict = ["""BertTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : List[Any] = [
"""BERT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""BertForMaskedLM""",
"""BertForMultipleChoice""",
"""BertForNextSentencePrediction""",
"""BertForPreTraining""",
"""BertForQuestionAnswering""",
"""BertForSequenceClassification""",
"""BertForTokenClassification""",
"""BertLayer""",
"""BertLMHeadModel""",
"""BertModel""",
"""BertPreTrainedModel""",
"""load_tf_weights_in_bert""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : List[str] = [
"""TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFBertEmbeddings""",
"""TFBertForMaskedLM""",
"""TFBertForMultipleChoice""",
"""TFBertForNextSentencePrediction""",
"""TFBertForPreTraining""",
"""TFBertForQuestionAnswering""",
"""TFBertForSequenceClassification""",
"""TFBertForTokenClassification""",
"""TFBertLMHeadModel""",
"""TFBertMainLayer""",
"""TFBertModel""",
"""TFBertPreTrainedModel""",
]
try:
if not is_tensorflow_text_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : int = ["""TFBertTokenizer"""]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : Optional[Any] = [
"""FlaxBertForCausalLM""",
"""FlaxBertForMaskedLM""",
"""FlaxBertForMultipleChoice""",
"""FlaxBertForNextSentencePrediction""",
"""FlaxBertForPreTraining""",
"""FlaxBertForQuestionAnswering""",
"""FlaxBertForSequenceClassification""",
"""FlaxBertForTokenClassification""",
"""FlaxBertModel""",
"""FlaxBertPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_bert import BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, BertConfig, BertOnnxConfig
from .tokenization_bert import BasicTokenizer, BertTokenizer, WordpieceTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bert_fast import BertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_bert import (
BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
BertForMaskedLM,
BertForMultipleChoice,
BertForNextSentencePrediction,
BertForPreTraining,
BertForQuestionAnswering,
BertForSequenceClassification,
BertForTokenClassification,
BertLayer,
BertLMHeadModel,
BertModel,
BertPreTrainedModel,
load_tf_weights_in_bert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_bert import (
TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFBertEmbeddings,
TFBertForMaskedLM,
TFBertForMultipleChoice,
TFBertForNextSentencePrediction,
TFBertForPreTraining,
TFBertForQuestionAnswering,
TFBertForSequenceClassification,
TFBertForTokenClassification,
TFBertLMHeadModel,
TFBertMainLayer,
TFBertModel,
TFBertPreTrainedModel,
)
try:
if not is_tensorflow_text_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bert_tf import TFBertTokenizer
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_bert import (
FlaxBertForCausalLM,
FlaxBertForMaskedLM,
FlaxBertForMultipleChoice,
FlaxBertForNextSentencePrediction,
FlaxBertForPreTraining,
FlaxBertForQuestionAnswering,
FlaxBertForSequenceClassification,
FlaxBertForTokenClassification,
FlaxBertModel,
FlaxBertPreTrainedModel,
)
else:
import sys
a : Optional[int] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 672 | 1 |
'''simple docstring'''
import argparse
import glob
import logging
import os
import sys
import time
from collections import defaultdict
from pathlib import Path
from typing import Dict, List, Tuple
import numpy as np
import pytorch_lightning as pl
import torch
from callbacks import SeqaSeqLoggingCallback, get_checkpoint_callback, get_early_stopping_callback
from torch import nn
from torch.utils.data import DataLoader
from transformers import MBartTokenizer, TaForConditionalGeneration
from transformers.models.bart.modeling_bart import shift_tokens_right
from utils import (
ROUGE_KEYS,
LegacySeqaSeqDataset,
SeqaSeqDataset,
assert_all_frozen,
calculate_bleu,
calculate_rouge,
check_output_dir,
flatten_list,
freeze_embeds,
freeze_params,
get_git_info,
label_smoothed_nll_loss,
lmap,
pickle_save,
save_git_info,
save_json,
use_task_specific_params,
)
# need the parent dir module
sys.path.insert(2, str(Path(__file__).resolve().parents[1]))
from lightning_base import BaseTransformer, add_generic_args, generic_train # noqa
a : str = logging.getLogger(__name__)
class UpperCamelCase_ ( __magic_name__ ):
lowercase = 'summarization'
lowercase = ['loss']
lowercase = ROUGE_KEYS
lowercase = 'rouge2'
def __init__( self , A , **A ) -> int:
if hparams.sortish_sampler and hparams.gpus > 1:
UpperCAmelCase : List[Any] = False
elif hparams.max_tokens_per_batch is not None:
if hparams.gpus > 1:
raise NotImplementedError("""Dynamic Batch size does not work for multi-gpu training""" )
if hparams.sortish_sampler:
raise ValueError("""--sortish_sampler and --max_tokens_per_batch may not be used simultaneously""" )
super().__init__(A , num_labels=A , mode=self.mode , **A )
use_task_specific_params(self.model , """summarization""" )
save_git_info(self.hparams.output_dir )
UpperCAmelCase : Dict = Path(self.output_dir ) / """metrics.json"""
UpperCAmelCase : Dict = Path(self.output_dir ) / """hparams.pkl"""
pickle_save(self.hparams , self.hparams_save_path )
UpperCAmelCase : int = 0
UpperCAmelCase : str = defaultdict(A )
UpperCAmelCase : List[str] = self.config.model_type
UpperCAmelCase : Any = self.config.tgt_vocab_size if self.model_type == """fsmt""" else self.config.vocab_size
UpperCAmelCase : dict = {
"data_dir": self.hparams.data_dir,
"max_source_length": self.hparams.max_source_length,
"prefix": self.model.config.prefix or "",
}
UpperCAmelCase : List[str] = {
"""train""": self.hparams.n_train,
"""val""": self.hparams.n_val,
"""test""": self.hparams.n_test,
}
UpperCAmelCase : Union[str, Any] = {k: v if v >= 0 else None for k, v in n_observations_per_split.items()}
UpperCAmelCase : Union[str, Any] = {
"""train""": self.hparams.max_target_length,
"""val""": self.hparams.val_max_target_length,
"""test""": self.hparams.test_max_target_length,
}
assert self.target_lens["train"] <= self.target_lens["val"], f'''target_lens: {self.target_lens}'''
assert self.target_lens["train"] <= self.target_lens["test"], f'''target_lens: {self.target_lens}'''
if self.hparams.freeze_embeds:
freeze_embeds(self.model )
if self.hparams.freeze_encoder:
freeze_params(self.model.get_encoder() )
assert_all_frozen(self.model.get_encoder() )
UpperCAmelCase : Optional[Any] = get_git_info()["""repo_sha"""]
UpperCAmelCase : Union[str, Any] = hparams.num_workers
UpperCAmelCase : List[str] = None # default to config
if self.model.config.decoder_start_token_id is None and isinstance(self.tokenizer , A ):
UpperCAmelCase : Optional[int] = self.tokenizer.lang_code_to_id[hparams.tgt_lang]
UpperCAmelCase : Dict = self.decoder_start_token_id
UpperCAmelCase : Any = (
SeqaSeqDataset if hasattr(self.tokenizer , """prepare_seq2seq_batch""" ) else LegacySeqaSeqDataset
)
UpperCAmelCase : List[str] = False
UpperCAmelCase : Union[str, Any] = self.model.config.num_beams if self.hparams.eval_beams is None else self.hparams.eval_beams
if self.hparams.eval_max_gen_length is not None:
UpperCAmelCase : int = self.hparams.eval_max_gen_length
else:
UpperCAmelCase : Dict = self.model.config.max_length
UpperCAmelCase : List[Any] = self.default_val_metric if self.hparams.val_metric is None else self.hparams.val_metric
def _lowercase( self , A ) -> Dict[str, List[str]]:
UpperCAmelCase : Dict = {
k: self.tokenizer.batch_decode(v.tolist() ) if """mask""" not in k else v.shape for k, v in batch.items()
}
save_json(A , Path(self.output_dir ) / """text_batch.json""" )
save_json({k: v.tolist() for k, v in batch.items()} , Path(self.output_dir ) / """tok_batch.json""" )
UpperCAmelCase : Tuple = True
return readable_batch
def _lowercase( self , A , **A ) -> int:
return self.model(A , **A )
def _lowercase( self , A ) -> Optional[Any]:
UpperCAmelCase : Tuple = self.tokenizer.batch_decode(
A , skip_special_tokens=A , clean_up_tokenization_spaces=A )
return lmap(str.strip , A )
def _lowercase( self , A ) -> Tuple:
UpperCAmelCase : Optional[int] = self.tokenizer.pad_token_id
UpperCAmelCase , UpperCAmelCase : Optional[int] = batch["""input_ids"""], batch["""attention_mask"""]
UpperCAmelCase : Union[str, Any] = batch["""labels"""]
if isinstance(self.model , A ):
UpperCAmelCase : Any = self.model._shift_right(A )
else:
UpperCAmelCase : int = shift_tokens_right(A , A )
if not self.already_saved_batch: # This would be slightly better if it only happened on rank zero
UpperCAmelCase : List[str] = decoder_input_ids
self.save_readable_batch(A )
UpperCAmelCase : Optional[Any] = self(A , attention_mask=A , decoder_input_ids=A , use_cache=A )
UpperCAmelCase : Any = outputs["""logits"""]
if self.hparams.label_smoothing == 0:
# Same behavior as modeling_bart.py, besides ignoring pad_token_id
UpperCAmelCase : Optional[int] = nn.CrossEntropyLoss(ignore_index=A )
assert lm_logits.shape[-1] == self.vocab_size
UpperCAmelCase : int = ce_loss_fct(lm_logits.view(-1 , lm_logits.shape[-1] ) , tgt_ids.view(-1 ) )
else:
UpperCAmelCase : str = nn.functional.log_softmax(A , dim=-1 )
UpperCAmelCase , UpperCAmelCase : Tuple = label_smoothed_nll_loss(
A , A , self.hparams.label_smoothing , ignore_index=A )
return (loss,)
@property
def _lowercase( self ) -> int:
return self.tokenizer.pad_token_id
def _lowercase( self , A , A ) -> Dict:
UpperCAmelCase : List[Any] = self._step(A )
UpperCAmelCase : Any = dict(zip(self.loss_names , A ) )
# tokens per batch
UpperCAmelCase : List[str] = batch["""input_ids"""].ne(self.pad ).sum() + batch["""labels"""].ne(self.pad ).sum()
UpperCAmelCase : Any = batch["""input_ids"""].shape[0]
UpperCAmelCase : Optional[int] = batch["""input_ids"""].eq(self.pad ).sum()
UpperCAmelCase : str = batch["""input_ids"""].eq(self.pad ).float().mean()
# TODO(SS): make a wandb summary metric for this
return {"loss": loss_tensors[0], "log": logs}
def _lowercase( self , A , A ) -> Dict:
return self._generative_step(A )
def _lowercase( self , A , A="val" ) -> Dict:
self.step_count += 1
UpperCAmelCase : List[Any] = {k: torch.stack([x[k] for x in outputs] ).mean() for k in self.loss_names}
UpperCAmelCase : Union[str, Any] = losses["""loss"""]
UpperCAmelCase : str = {
k: np.array([x[k] for x in outputs] ).mean() for k in self.metric_names + ["""gen_time""", """gen_len"""]
}
UpperCAmelCase : Dict = (
generative_metrics[self.val_metric] if self.val_metric in generative_metrics else losses[self.val_metric]
)
UpperCAmelCase : torch.FloatTensor = torch.tensor(A ).type_as(A )
generative_metrics.update({k: v.item() for k, v in losses.items()} )
losses.update(A )
UpperCAmelCase : str = {f'''{prefix}_avg_{k}''': x for k, x in losses.items()}
UpperCAmelCase : Union[str, Any] = self.step_count
self.metrics[prefix].append(A ) # callback writes this to self.metrics_save_path
UpperCAmelCase : List[Any] = flatten_list([x["""preds"""] for x in outputs] )
return {
"log": all_metrics,
"preds": preds,
f'''{prefix}_loss''': loss,
f'''{prefix}_{self.val_metric}''': metric_tensor,
}
def _lowercase( self , A , A ) -> Dict:
return calculate_rouge(A , A )
def _lowercase( self , A ) -> dict:
UpperCAmelCase : Union[str, Any] = time.time()
# parser.add_argument('--eval_max_gen_length', type=int, default=None, help='never generate more than n tokens')
UpperCAmelCase : Tuple = self.model.generate(
batch["""input_ids"""] , attention_mask=batch["""attention_mask"""] , use_cache=A , decoder_start_token_id=self.decoder_start_token_id , num_beams=self.eval_beams , max_length=self.eval_max_length , )
UpperCAmelCase : str = (time.time() - ta) / batch["""input_ids"""].shape[0]
UpperCAmelCase : List[str] = self.ids_to_clean_text(A )
UpperCAmelCase : List[str] = self.ids_to_clean_text(batch["""labels"""] )
UpperCAmelCase : Optional[Any] = self._step(A )
UpperCAmelCase : Any = dict(zip(self.loss_names , A ) )
UpperCAmelCase : Dict = self.calc_generative_metrics(A , A )
UpperCAmelCase : Any = np.mean(lmap(A , A ) )
base_metrics.update(gen_time=A , gen_len=A , preds=A , target=A , **A )
return base_metrics
def _lowercase( self , A , A ) -> Dict:
return self._generative_step(A )
def _lowercase( self , A ) -> List[str]:
return self.validation_epoch_end(A , prefix="""test""" )
def _lowercase( self , A ) -> SeqaSeqDataset:
UpperCAmelCase : str = self.n_obs[type_path]
UpperCAmelCase : str = self.target_lens[type_path]
UpperCAmelCase : str = self.dataset_class(
self.tokenizer , type_path=A , n_obs=A , max_target_length=A , **self.dataset_kwargs , )
return dataset
def _lowercase( self , A , A , A = False ) -> DataLoader:
UpperCAmelCase : Tuple = self.get_dataset(A )
if self.hparams.sortish_sampler and type_path != "test" and type_path != "val":
UpperCAmelCase : List[Any] = dataset.make_sortish_sampler(A , distributed=self.hparams.gpus > 1 )
return DataLoader(
A , batch_size=A , collate_fn=dataset.collate_fn , shuffle=A , num_workers=self.num_workers , sampler=A , )
elif self.hparams.max_tokens_per_batch is not None and type_path != "test" and type_path != "val":
UpperCAmelCase : List[str] = dataset.make_dynamic_sampler(
self.hparams.max_tokens_per_batch , distributed=self.hparams.gpus > 1 )
return DataLoader(
A , batch_sampler=A , collate_fn=dataset.collate_fn , num_workers=self.num_workers , )
else:
return DataLoader(
A , batch_size=A , collate_fn=dataset.collate_fn , shuffle=A , num_workers=self.num_workers , sampler=A , )
def _lowercase( self ) -> DataLoader:
UpperCAmelCase : Optional[Any] = self.get_dataloader("""train""" , batch_size=self.hparams.train_batch_size , shuffle=A )
return dataloader
def _lowercase( self ) -> DataLoader:
return self.get_dataloader("""val""" , batch_size=self.hparams.eval_batch_size )
def _lowercase( self ) -> DataLoader:
return self.get_dataloader("""test""" , batch_size=self.hparams.eval_batch_size )
@staticmethod
def _lowercase( A , A ) -> Optional[Any]:
BaseTransformer.add_model_specific_args(A , A )
add_generic_args(A , A )
parser.add_argument(
"""--max_source_length""" , default=1024 , type=A , help=(
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
) , )
parser.add_argument(
"""--max_target_length""" , default=56 , type=A , help=(
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
) , )
parser.add_argument(
"""--val_max_target_length""" , default=142 , type=A , help=(
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
) , )
parser.add_argument(
"""--test_max_target_length""" , default=142 , type=A , help=(
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
) , )
parser.add_argument("""--freeze_encoder""" , action="""store_true""" )
parser.add_argument("""--freeze_embeds""" , action="""store_true""" )
parser.add_argument("""--sortish_sampler""" , action="""store_true""" , default=A )
parser.add_argument("""--overwrite_output_dir""" , action="""store_true""" , default=A )
parser.add_argument("""--max_tokens_per_batch""" , type=A , default=A )
parser.add_argument("""--logger_name""" , type=A , choices=["""default""", """wandb""", """wandb_shared"""] , default="""default""" )
parser.add_argument("""--n_train""" , type=A , default=-1 , required=A , help="""# examples. -1 means use all.""" )
parser.add_argument("""--n_val""" , type=A , default=500 , required=A , help="""# examples. -1 means use all.""" )
parser.add_argument("""--n_test""" , type=A , default=-1 , required=A , help="""# examples. -1 means use all.""" )
parser.add_argument(
"""--task""" , type=A , default="""summarization""" , required=A , help="""# examples. -1 means use all.""" )
parser.add_argument("""--label_smoothing""" , type=A , default=0.0 , required=A )
parser.add_argument("""--src_lang""" , type=A , default="""""" , required=A )
parser.add_argument("""--tgt_lang""" , type=A , default="""""" , required=A )
parser.add_argument("""--eval_beams""" , type=A , default=A , required=A )
parser.add_argument(
"""--val_metric""" , type=A , default=A , required=A , choices=["""bleu""", """rouge2""", """loss""", None] )
parser.add_argument("""--eval_max_gen_length""" , type=A , default=A , help="""never generate more than n tokens""" )
parser.add_argument("""--save_top_k""" , type=A , default=1 , required=A , help="""How many checkpoints to save""" )
parser.add_argument(
"""--early_stopping_patience""" , type=A , default=-1 , required=A , help=(
"""-1 means never early stop. early_stopping_patience is measured in validation checks, not epochs. So"""
""" val_check_interval will effect it."""
) , )
return parser
class UpperCamelCase_ ( __magic_name__ ):
lowercase = 'translation'
lowercase = ['loss']
lowercase = ['bleu']
lowercase = 'bleu'
def __init__( self , A , **A ) -> List[str]:
super().__init__(A , **A )
UpperCAmelCase : Dict = hparams.src_lang
UpperCAmelCase : Dict = hparams.tgt_lang
def _lowercase( self , A , A ) -> dict:
return calculate_bleu(A , A )
def __lowerCamelCase ( _lowercase , _lowercase=None ) -> SummarizationModule:
Path(args.output_dir ).mkdir(exist_ok=_lowercase )
check_output_dir(_lowercase , expected_items=3 )
if model is None:
if "summarization" in args.task:
UpperCAmelCase : SummarizationModule = SummarizationModule(_lowercase )
else:
UpperCAmelCase : SummarizationModule = TranslationModule(_lowercase )
UpperCAmelCase : Tuple = Path(args.data_dir ).name
if (
args.logger_name == "default"
or args.fast_dev_run
or str(args.output_dir ).startswith("""/tmp""" )
or str(args.output_dir ).startswith("""/var""" )
):
UpperCAmelCase : Optional[int] = True # don't pollute wandb logs unnecessarily
elif args.logger_name == "wandb":
from pytorch_lightning.loggers import WandbLogger
UpperCAmelCase : Any = os.environ.get("""WANDB_PROJECT""" , _lowercase )
UpperCAmelCase : Tuple = WandbLogger(name=model.output_dir.name , project=_lowercase )
elif args.logger_name == "wandb_shared":
from pytorch_lightning.loggers import WandbLogger
UpperCAmelCase : List[str] = WandbLogger(name=model.output_dir.name , project=F'''hf_{dataset}''' )
if args.early_stopping_patience >= 0:
UpperCAmelCase : str = get_early_stopping_callback(model.val_metric , args.early_stopping_patience )
else:
UpperCAmelCase : Any = False
UpperCAmelCase : int = args.val_metric == """loss"""
UpperCAmelCase : pl.Trainer = generic_train(
_lowercase , _lowercase , logging_callback=SeqaSeqLoggingCallback() , checkpoint_callback=get_checkpoint_callback(
args.output_dir , model.val_metric , args.save_top_k , _lowercase ) , early_stopping_callback=_lowercase , logger=_lowercase , )
pickle_save(model.hparams , model.output_dir / """hparams.pkl""" )
if not args.do_predict:
return model
UpperCAmelCase : Tuple = """"""
UpperCAmelCase : Optional[int] = sorted(glob.glob(os.path.join(args.output_dir , """*.ckpt""" ) , recursive=_lowercase ) )
if checkpoints:
UpperCAmelCase : List[Any] = checkpoints[-1]
UpperCAmelCase : Optional[Any] = checkpoints[-1]
trainer.logger.log_hyperparams(model.hparams )
# test() without a model tests using the best checkpoint automatically
trainer.test()
return model
if __name__ == "__main__":
a : List[str] = argparse.ArgumentParser()
a : List[str] = pl.Trainer.add_argparse_args(parser)
a : str = SummarizationModule.add_model_specific_args(parser, os.getcwd())
a : int = parser.parse_args()
main(args)
| 672 |
'''simple docstring'''
import math
import random
from typing import Any
from .hill_climbing import SearchProblem
def __lowerCamelCase ( _lowercase , _lowercase = True , _lowercase = math.inf , _lowercase = -math.inf , _lowercase = math.inf , _lowercase = -math.inf , _lowercase = False , _lowercase = 1_0_0 , _lowercase = 0.01 , _lowercase = 1 , ) -> Any:
UpperCAmelCase : Optional[int] = False
UpperCAmelCase : Any = search_prob
UpperCAmelCase : Any = start_temperate
UpperCAmelCase : Optional[Any] = []
UpperCAmelCase : Optional[Any] = 0
UpperCAmelCase : Optional[Any] = None
while not search_end:
UpperCAmelCase : List[str] = current_state.score()
if best_state is None or current_score > best_state.score():
UpperCAmelCase : List[Any] = current_state
scores.append(_lowercase )
iterations += 1
UpperCAmelCase : Dict = None
UpperCAmelCase : Union[str, Any] = current_state.get_neighbors()
while (
next_state is None and neighbors
): # till we do not find a neighbor that we can move to
UpperCAmelCase : int = random.randint(0 , len(_lowercase ) - 1 ) # picking a random neighbor
UpperCAmelCase : int = neighbors.pop(_lowercase )
UpperCAmelCase : Tuple = picked_neighbor.score() - current_score
if (
picked_neighbor.x > max_x
or picked_neighbor.x < min_x
or picked_neighbor.y > max_y
or picked_neighbor.y < min_y
):
continue # neighbor outside our bounds
if not find_max:
UpperCAmelCase : Union[str, Any] = change * -1 # in case we are finding minimum
if change > 0: # improves the solution
UpperCAmelCase : int = picked_neighbor
else:
UpperCAmelCase : Optional[Any] = (math.e) ** (
change / current_temp
) # probability generation function
if random.random() < probability: # random number within probability
UpperCAmelCase : Optional[int] = picked_neighbor
UpperCAmelCase : List[Any] = current_temp - (current_temp * rate_of_decrease)
if current_temp < threshold_temp or next_state is None:
# temperature below threshold, or could not find a suitable neighbor
UpperCAmelCase : Optional[int] = True
else:
UpperCAmelCase : Optional[int] = next_state
if visualization:
from matplotlib import pyplot as plt
plt.plot(range(_lowercase ) , _lowercase )
plt.xlabel("""Iterations""" )
plt.ylabel("""Function values""" )
plt.show()
return best_state
if __name__ == "__main__":
def __lowerCamelCase ( _lowercase , _lowercase ) -> str:
return (x**2) + (y**2)
# starting the problem with initial coordinates (12, 47)
a : Dict = SearchProblem(x=1_2, y=4_7, step_size=1, function_to_optimize=test_fa)
a : Dict = simulated_annealing(
prob, find_max=False, max_x=1_0_0, min_x=5, max_y=5_0, min_y=-5, visualization=True
)
print(
"""The minimum score for f(x, y) = x^2 + y^2 with the domain 100 > x > 5 """
F'''and 50 > y > - 5 found via hill climbing: {local_min.score()}'''
)
# starting the problem with initial coordinates (12, 47)
a : List[str] = SearchProblem(x=1_2, y=4_7, step_size=1, function_to_optimize=test_fa)
a : Dict = simulated_annealing(
prob, find_max=True, max_x=1_0_0, min_x=5, max_y=5_0, min_y=-5, visualization=True
)
print(
"""The maximum score for f(x, y) = x^2 + y^2 with the domain 100 > x > 5 """
F'''and 50 > y > - 5 found via hill climbing: {local_min.score()}'''
)
def __lowerCamelCase ( _lowercase , _lowercase ) -> Optional[Any]:
return (3 * x**2) - (6 * y)
a : Tuple = SearchProblem(x=3, y=4, step_size=1, function_to_optimize=test_fa)
a : Any = simulated_annealing(prob, find_max=False, visualization=True)
print(
"""The minimum score for f(x, y) = 3*x^2 - 6*y found via hill climbing: """
F'''{local_min.score()}'''
)
a : List[Any] = SearchProblem(x=3, y=4, step_size=1, function_to_optimize=test_fa)
a : Dict = simulated_annealing(prob, find_max=True, visualization=True)
print(
"""The maximum score for f(x, y) = 3*x^2 - 6*y found via hill climbing: """
F'''{local_min.score()}'''
)
| 672 | 1 |
'''simple docstring'''
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import rescale, resize, to_channel_dimension_format
from ...image_utils import (
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
a : int = logging.get_logger(__name__)
def __lowerCamelCase ( _lowercase , _lowercase ) -> Any:
UpperCAmelCase : Optional[int] = b.T
UpperCAmelCase : Tuple = np.sum(np.square(_lowercase ) , axis=1 )
UpperCAmelCase : Union[str, Any] = np.sum(np.square(_lowercase ) , axis=0 )
UpperCAmelCase : int = np.matmul(_lowercase , _lowercase )
UpperCAmelCase : List[Any] = aa[:, None] - 2 * ab + ba[None, :]
return d
def __lowerCamelCase ( _lowercase , _lowercase ) -> int:
UpperCAmelCase : Dict = x.reshape(-1 , 3 )
UpperCAmelCase : Union[str, Any] = squared_euclidean_distance(_lowercase , _lowercase )
return np.argmin(_lowercase , axis=1 )
class UpperCamelCase_ ( __magic_name__ ):
lowercase = ['pixel_values']
def __init__( self , A = None , A = True , A = None , A = PILImageResampling.BILINEAR , A = True , A = True , **A , ) -> None:
super().__init__(**A )
UpperCAmelCase : Optional[Any] = size if size is not None else {"""height""": 256, """width""": 256}
UpperCAmelCase : int = get_size_dict(A )
UpperCAmelCase : Union[str, Any] = np.array(A ) if clusters is not None else None
UpperCAmelCase : Optional[Any] = do_resize
UpperCAmelCase : str = size
UpperCAmelCase : Dict = resample
UpperCAmelCase : Union[str, Any] = do_normalize
UpperCAmelCase : Any = do_color_quantize
def _lowercase( self , A , A , A = PILImageResampling.BILINEAR , A = None , **A , ) -> np.ndarray:
UpperCAmelCase : int = get_size_dict(A )
if "height" not in size or "width" not in size:
raise ValueError(f'''Size dictionary must contain both height and width keys. Got {size.keys()}''' )
return resize(
A , size=(size["""height"""], size["""width"""]) , resample=A , data_format=A , **A )
def _lowercase( self , A , A = None , ) -> np.ndarray:
UpperCAmelCase : Union[str, Any] = rescale(image=A , scale=1 / 1_2_7.5 , data_format=A )
UpperCAmelCase : Optional[Any] = image - 1
return image
def _lowercase( self , A , A = None , A = None , A = None , A = None , A = None , A = None , A = None , A = ChannelDimension.FIRST , **A , ) -> PIL.Image.Image:
UpperCAmelCase : int = do_resize if do_resize is not None else self.do_resize
UpperCAmelCase : Dict = size if size is not None else self.size
UpperCAmelCase : Optional[int] = get_size_dict(A )
UpperCAmelCase : Dict = resample if resample is not None else self.resample
UpperCAmelCase : Optional[Any] = do_normalize if do_normalize is not None else self.do_normalize
UpperCAmelCase : List[Any] = do_color_quantize if do_color_quantize is not None else self.do_color_quantize
UpperCAmelCase : str = clusters if clusters is not None else self.clusters
UpperCAmelCase : str = np.array(A )
UpperCAmelCase : Dict = make_list_of_images(A )
if not valid_images(A ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_resize and size is None or resample is None:
raise ValueError("""Size and resample must be specified if do_resize is True.""" )
if do_color_quantize and clusters is None:
raise ValueError("""Clusters must be specified if do_color_quantize is True.""" )
# All transformations expect numpy arrays.
UpperCAmelCase : Optional[int] = [to_numpy_array(A ) for image in images]
if do_resize:
UpperCAmelCase : Tuple = [self.resize(image=A , size=A , resample=A ) for image in images]
if do_normalize:
UpperCAmelCase : Any = [self.normalize(image=A ) for image in images]
if do_color_quantize:
UpperCAmelCase : Optional[int] = [to_channel_dimension_format(A , ChannelDimension.LAST ) for image in images]
# color quantize from (batch_size, height, width, 3) to (batch_size, height, width)
UpperCAmelCase : int = np.array(A )
UpperCAmelCase : Any = color_quantize(A , A ).reshape(images.shape[:-1] )
# flatten to (batch_size, height*width)
UpperCAmelCase : List[str] = images.shape[0]
UpperCAmelCase : Optional[int] = images.reshape(A , -1 )
# We need to convert back to a list of images to keep consistent behaviour across processors.
UpperCAmelCase : int = list(A )
else:
UpperCAmelCase : Any = [to_channel_dimension_format(A , A ) for image in images]
UpperCAmelCase : Union[str, Any] = {"""input_ids""": images}
return BatchFeature(data=A , tensor_type=A )
| 672 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
a : Any = {
"""configuration_nezha""": ["""NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP""", """NezhaConfig"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : Any = [
"""NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""NezhaForNextSentencePrediction""",
"""NezhaForMaskedLM""",
"""NezhaForPreTraining""",
"""NezhaForMultipleChoice""",
"""NezhaForQuestionAnswering""",
"""NezhaForSequenceClassification""",
"""NezhaForTokenClassification""",
"""NezhaModel""",
"""NezhaPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_nezha import NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP, NezhaConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_nezha import (
NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST,
NezhaForMaskedLM,
NezhaForMultipleChoice,
NezhaForNextSentencePrediction,
NezhaForPreTraining,
NezhaForQuestionAnswering,
NezhaForSequenceClassification,
NezhaForTokenClassification,
NezhaModel,
NezhaPreTrainedModel,
)
else:
import sys
a : Union[str, Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 672 | 1 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a : str = logging.get_logger(__name__)
a : Any = {
"""google/fnet-base""": """https://huggingface.co/google/fnet-base/resolve/main/config.json""",
"""google/fnet-large""": """https://huggingface.co/google/fnet-large/resolve/main/config.json"""
# See all FNet models at https://huggingface.co/models?filter=fnet
}
class UpperCamelCase_ ( __magic_name__ ):
lowercase = 'fnet'
def __init__( self , A=32000 , A=768 , A=12 , A=3072 , A="gelu_new" , A=0.1 , A=512 , A=4 , A=0.0_2 , A=1e-12 , A=False , A=512 , A=3 , A=1 , A=2 , **A , ) -> int:
super().__init__(pad_token_id=A , bos_token_id=A , eos_token_id=A , **A )
UpperCAmelCase : Optional[Any] = vocab_size
UpperCAmelCase : Dict = max_position_embeddings
UpperCAmelCase : List[str] = hidden_size
UpperCAmelCase : int = num_hidden_layers
UpperCAmelCase : Any = intermediate_size
UpperCAmelCase : List[str] = hidden_act
UpperCAmelCase : Tuple = hidden_dropout_prob
UpperCAmelCase : List[str] = initializer_range
UpperCAmelCase : List[Any] = type_vocab_size
UpperCAmelCase : int = layer_norm_eps
UpperCAmelCase : Optional[Any] = use_tpu_fourier_optimizations
UpperCAmelCase : List[Any] = tpu_short_seq_length
| 672 |
'''simple docstring'''
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionImageVariationPipeline
from diffusers.utils.testing_utils import load_image, require_torch_gpu, slow, torch_device
a : Tuple = False
class UpperCamelCase_ ( unittest.TestCase ):
pass
@slow
@require_torch_gpu
class UpperCamelCase_ ( unittest.TestCase ):
def _lowercase( self ) -> Optional[Any]:
UpperCAmelCase : Any = VersatileDiffusionImageVariationPipeline.from_pretrained("""shi-labs/versatile-diffusion""" )
pipe.to(A )
pipe.set_progress_bar_config(disable=A )
UpperCAmelCase : List[str] = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg""" )
UpperCAmelCase : List[Any] = torch.manual_seed(0 )
UpperCAmelCase : List[str] = pipe(
image=A , generator=A , guidance_scale=7.5 , num_inference_steps=50 , output_type="""numpy""" , ).images
UpperCAmelCase : Tuple = image[0, 253:256, 253:256, -1]
assert image.shape == (1, 512, 512, 3)
UpperCAmelCase : List[str] = np.array([0.0_4_4_1, 0.0_4_6_9, 0.0_5_0_7, 0.0_5_7_5, 0.0_6_3_2, 0.0_6_5_0, 0.0_8_6_5, 0.0_9_0_9, 0.0_9_4_5] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 672 | 1 |
'''simple docstring'''
from itertools import product
def __lowerCamelCase ( _lowercase , _lowercase ) -> list[int]:
UpperCAmelCase : Any = sides_number
UpperCAmelCase : List[Any] = max_face_number * dice_number
UpperCAmelCase : int = [0] * (max_total + 1)
UpperCAmelCase : str = 1
UpperCAmelCase : Any = range(_lowercase , max_face_number + 1 )
for dice_numbers in product(_lowercase , repeat=_lowercase ):
UpperCAmelCase : Union[str, Any] = sum(_lowercase )
totals_frequencies[total] += 1
return totals_frequencies
def __lowerCamelCase ( ) -> float:
UpperCAmelCase : str = total_frequency_distribution(
sides_number=4 , dice_number=9 )
UpperCAmelCase : Optional[int] = total_frequency_distribution(
sides_number=6 , dice_number=6 )
UpperCAmelCase : List[Any] = 0
UpperCAmelCase : List[Any] = 9
UpperCAmelCase : Dict = 4 * 9
UpperCAmelCase : Dict = 6
for peter_total in range(_lowercase , max_peter_total + 1 ):
peter_wins_count += peter_totals_frequencies[peter_total] * sum(
colin_totals_frequencies[min_colin_total:peter_total] )
UpperCAmelCase : str = (4**9) * (6**6)
UpperCAmelCase : Optional[Any] = peter_wins_count / total_games_number
UpperCAmelCase : Tuple = round(_lowercase , ndigits=7 )
return rounded_peter_win_probability
if __name__ == "__main__":
print(F'''{solution() = }''')
| 672 |
'''simple docstring'''
# Lint as: python3
import sys
from collections.abc import Mapping
from typing import TYPE_CHECKING, Dict, Optional
import numpy as np
import pyarrow as pa
from .. import config
from ..utils.logging import get_logger
from ..utils.py_utils import map_nested
from .formatting import TensorFormatter
if TYPE_CHECKING:
import jax
import jaxlib
a : Any = get_logger()
a : Optional[dict] = None
class UpperCamelCase_ ( TensorFormatter[Mapping, 'jax.Array', Mapping] ):
def __init__( self , A=None , A=None , **A ) -> str:
super().__init__(features=A )
import jax
from jaxlib.xla_client import Device
if isinstance(A , A ):
raise ValueError(
f'''Expected {device} to be a `str` not {type(A )}, as `jaxlib.xla_extension.Device` '''
"""is not serializable neither with `pickle` nor with `dill`. Instead you can surround """
"""the device with `str()` to get its string identifier that will be internally mapped """
"""to the actual `jaxlib.xla_extension.Device`.""" )
UpperCAmelCase : Optional[int] = device if isinstance(A , A ) else str(jax.devices()[0] )
# using global variable since `jaxlib.xla_extension.Device` is not serializable neither
# with `pickle` nor with `dill`, so we need to use a global variable instead
global DEVICE_MAPPING
if DEVICE_MAPPING is None:
UpperCAmelCase : Any = self._map_devices_to_str()
if self.device not in list(DEVICE_MAPPING.keys() ):
logger.warning(
f'''Device with string identifier {self.device} not listed among the available '''
f'''devices: {list(DEVICE_MAPPING.keys() )}, so falling back to the default '''
f'''device: {str(jax.devices()[0] )}.''' )
UpperCAmelCase : List[Any] = str(jax.devices()[0] )
UpperCAmelCase : Union[str, Any] = jnp_array_kwargs
@staticmethod
def _lowercase( ) -> Dict[str, "jaxlib.xla_extension.Device"]:
import jax
return {str(A ): device for device in jax.devices()}
def _lowercase( self , A ) -> str:
import jax
import jax.numpy as jnp
if isinstance(A , A ) and column:
if all(
isinstance(A , jax.Array ) and x.shape == column[0].shape and x.dtype == column[0].dtype for x in column ):
return jnp.stack(A , axis=0 )
return column
def _lowercase( self , A ) -> Tuple:
import jax
import jax.numpy as jnp
if isinstance(A , (str, bytes, type(A )) ):
return value
elif isinstance(A , (np.character, np.ndarray) ) and np.issubdtype(value.dtype , np.character ):
return value.tolist()
UpperCAmelCase : List[str] = {}
if isinstance(A , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.integer ):
# the default int precision depends on the jax config
# see https://jax.readthedocs.io/en/latest/notebooks/Common_Gotchas_in_JAX.html#double-64bit-precision
if jax.config.jax_enable_xaa:
UpperCAmelCase : str = {"""dtype""": jnp.intaa}
else:
UpperCAmelCase : int = {"""dtype""": jnp.intaa}
elif isinstance(A , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.floating ):
UpperCAmelCase : Any = {"""dtype""": jnp.floataa}
elif config.PIL_AVAILABLE and "PIL" in sys.modules:
import PIL.Image
if isinstance(A , PIL.Image.Image ):
UpperCAmelCase : List[str] = np.asarray(A )
# using global variable since `jaxlib.xla_extension.Device` is not serializable neither
# with `pickle` nor with `dill`, so we need to use a global variable instead
global DEVICE_MAPPING
if DEVICE_MAPPING is None:
UpperCAmelCase : Dict = self._map_devices_to_str()
with jax.default_device(DEVICE_MAPPING[self.device] ):
# calling jnp.array on a np.ndarray does copy the data
# see https://github.com/google/jax/issues/4486
return jnp.array(A , **{**default_dtype, **self.jnp_array_kwargs} )
def _lowercase( self , A ) -> Tuple:
import jax
# support for torch, tf, jax etc.
if config.TORCH_AVAILABLE and "torch" in sys.modules:
import torch
if isinstance(A , torch.Tensor ):
return self._tensorize(data_struct.detach().cpu().numpy()[()] )
if hasattr(A , """__array__""" ) and not isinstance(A , jax.Array ):
UpperCAmelCase : Optional[int] = data_struct.__array__()
# support for nested types like struct of list of struct
if isinstance(A , np.ndarray ):
if data_struct.dtype == object: # jax arrays cannot be instantied from an array of objects
return self._consolidate([self.recursive_tensorize(A ) for substruct in data_struct] )
elif isinstance(A , (list, tuple) ):
return self._consolidate([self.recursive_tensorize(A ) for substruct in data_struct] )
return self._tensorize(A )
def _lowercase( self , A ) -> Dict:
return map_nested(self._recursive_tensorize , A , map_list=A )
def _lowercase( self , A ) -> Mapping:
UpperCAmelCase : Union[str, Any] = self.numpy_arrow_extractor().extract_row(A )
UpperCAmelCase : Dict = self.python_features_decoder.decode_row(A )
return self.recursive_tensorize(A )
def _lowercase( self , A ) -> "jax.Array":
UpperCAmelCase : int = self.numpy_arrow_extractor().extract_column(A )
UpperCAmelCase : Optional[Any] = self.python_features_decoder.decode_column(A , pa_table.column_names[0] )
UpperCAmelCase : Optional[int] = self.recursive_tensorize(A )
UpperCAmelCase : Any = self._consolidate(A )
return column
def _lowercase( self , A ) -> Mapping:
UpperCAmelCase : Optional[int] = self.numpy_arrow_extractor().extract_batch(A )
UpperCAmelCase : List[str] = self.python_features_decoder.decode_batch(A )
UpperCAmelCase : Union[str, Any] = self.recursive_tensorize(A )
for column_name in batch:
UpperCAmelCase : Optional[Any] = self._consolidate(batch[column_name] )
return batch
| 672 | 1 |
'''simple docstring'''
import math
import random
from typing import Any
from .hill_climbing import SearchProblem
def __lowerCamelCase ( _lowercase , _lowercase = True , _lowercase = math.inf , _lowercase = -math.inf , _lowercase = math.inf , _lowercase = -math.inf , _lowercase = False , _lowercase = 1_0_0 , _lowercase = 0.01 , _lowercase = 1 , ) -> Any:
UpperCAmelCase : Optional[int] = False
UpperCAmelCase : Any = search_prob
UpperCAmelCase : Any = start_temperate
UpperCAmelCase : Optional[Any] = []
UpperCAmelCase : Optional[Any] = 0
UpperCAmelCase : Optional[Any] = None
while not search_end:
UpperCAmelCase : List[str] = current_state.score()
if best_state is None or current_score > best_state.score():
UpperCAmelCase : List[Any] = current_state
scores.append(_lowercase )
iterations += 1
UpperCAmelCase : Dict = None
UpperCAmelCase : Union[str, Any] = current_state.get_neighbors()
while (
next_state is None and neighbors
): # till we do not find a neighbor that we can move to
UpperCAmelCase : int = random.randint(0 , len(_lowercase ) - 1 ) # picking a random neighbor
UpperCAmelCase : int = neighbors.pop(_lowercase )
UpperCAmelCase : Tuple = picked_neighbor.score() - current_score
if (
picked_neighbor.x > max_x
or picked_neighbor.x < min_x
or picked_neighbor.y > max_y
or picked_neighbor.y < min_y
):
continue # neighbor outside our bounds
if not find_max:
UpperCAmelCase : Union[str, Any] = change * -1 # in case we are finding minimum
if change > 0: # improves the solution
UpperCAmelCase : int = picked_neighbor
else:
UpperCAmelCase : Optional[Any] = (math.e) ** (
change / current_temp
) # probability generation function
if random.random() < probability: # random number within probability
UpperCAmelCase : Optional[int] = picked_neighbor
UpperCAmelCase : List[Any] = current_temp - (current_temp * rate_of_decrease)
if current_temp < threshold_temp or next_state is None:
# temperature below threshold, or could not find a suitable neighbor
UpperCAmelCase : Optional[int] = True
else:
UpperCAmelCase : Optional[int] = next_state
if visualization:
from matplotlib import pyplot as plt
plt.plot(range(_lowercase ) , _lowercase )
plt.xlabel("""Iterations""" )
plt.ylabel("""Function values""" )
plt.show()
return best_state
if __name__ == "__main__":
def __lowerCamelCase ( _lowercase , _lowercase ) -> str:
return (x**2) + (y**2)
# starting the problem with initial coordinates (12, 47)
a : Dict = SearchProblem(x=1_2, y=4_7, step_size=1, function_to_optimize=test_fa)
a : Dict = simulated_annealing(
prob, find_max=False, max_x=1_0_0, min_x=5, max_y=5_0, min_y=-5, visualization=True
)
print(
"""The minimum score for f(x, y) = x^2 + y^2 with the domain 100 > x > 5 """
F'''and 50 > y > - 5 found via hill climbing: {local_min.score()}'''
)
# starting the problem with initial coordinates (12, 47)
a : List[str] = SearchProblem(x=1_2, y=4_7, step_size=1, function_to_optimize=test_fa)
a : Dict = simulated_annealing(
prob, find_max=True, max_x=1_0_0, min_x=5, max_y=5_0, min_y=-5, visualization=True
)
print(
"""The maximum score for f(x, y) = x^2 + y^2 with the domain 100 > x > 5 """
F'''and 50 > y > - 5 found via hill climbing: {local_min.score()}'''
)
def __lowerCamelCase ( _lowercase , _lowercase ) -> Optional[Any]:
return (3 * x**2) - (6 * y)
a : Tuple = SearchProblem(x=3, y=4, step_size=1, function_to_optimize=test_fa)
a : Any = simulated_annealing(prob, find_max=False, visualization=True)
print(
"""The minimum score for f(x, y) = 3*x^2 - 6*y found via hill climbing: """
F'''{local_min.score()}'''
)
a : List[Any] = SearchProblem(x=3, y=4, step_size=1, function_to_optimize=test_fa)
a : Dict = simulated_annealing(prob, find_max=True, visualization=True)
print(
"""The maximum score for f(x, y) = 3*x^2 - 6*y found via hill climbing: """
F'''{local_min.score()}'''
)
| 672 |
'''simple docstring'''
from datetime import datetime as dt
import os
from github import Github
a : int = [
"""good first issue""",
"""good second issue""",
"""good difficult issue""",
"""feature request""",
"""new model""",
"""wip""",
]
def __lowerCamelCase ( ) -> Dict:
UpperCAmelCase : str = Github(os.environ["""GITHUB_TOKEN"""] )
UpperCAmelCase : Dict = g.get_repo("""huggingface/transformers""" )
UpperCAmelCase : int = repo.get_issues(state="""open""" )
for issue in open_issues:
UpperCAmelCase : Optional[int] = sorted([comment for comment in issue.get_comments()] , key=lambda _lowercase : i.created_at , reverse=_lowercase )
UpperCAmelCase : Any = comments[0] if len(_lowercase ) > 0 else None
if (
last_comment is not None
and last_comment.user.login == "github-actions[bot]"
and (dt.utcnow() - issue.updated_at).days > 7
and (dt.utcnow() - issue.created_at).days >= 3_0
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# print(f"Would close issue {issue.number} since it has been 7 days of inactivity since bot mention.")
issue.edit(state="""closed""" )
elif (
(dt.utcnow() - issue.updated_at).days > 2_3
and (dt.utcnow() - issue.created_at).days >= 3_0
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# print(f"Would add stale comment to {issue.number}")
issue.create_comment(
"""This issue has been automatically marked as stale because it has not had """
"""recent activity. If you think this still needs to be addressed """
"""please comment on this thread.\n\nPlease note that issues that do not follow the """
"""[contributing guidelines](https://github.com/huggingface/transformers/blob/main/CONTRIBUTING.md) """
"""are likely to be ignored.""" )
if __name__ == "__main__":
main()
| 672 | 1 |
'''simple docstring'''
import cmath
import math
def __lowerCamelCase ( _lowercase , _lowercase , _lowercase , _lowercase ) -> complex:
UpperCAmelCase : List[Any] = math.radians(_lowercase )
UpperCAmelCase : Union[str, Any] = math.radians(_lowercase )
# Convert voltage and current to rectangular form
UpperCAmelCase : Union[str, Any] = cmath.rect(_lowercase , _lowercase )
UpperCAmelCase : Union[str, Any] = cmath.rect(_lowercase , _lowercase )
# Calculate apparent power
return voltage_rect * current_rect
if __name__ == "__main__":
import doctest
doctest.testmod()
| 672 |
'''simple docstring'''
import unittest
from parameterized import parameterized
from transformers import OpenLlamaConfig, is_torch_available, set_seed
from transformers.testing_utils import require_torch, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import OpenLlamaForCausalLM, OpenLlamaForSequenceClassification, OpenLlamaModel
class UpperCamelCase_ :
def __init__( self , A , A=13 , A=7 , A=True , A=True , A=False , A=True , A=99 , A=32 , A=5 , A=4 , A=37 , A="gelu" , A=0.1 , A=0.1 , A=512 , A=16 , A=2 , A=0.0_2 , A=3 , A=4 , A=None , ) -> Any:
UpperCAmelCase : Optional[int] = parent
UpperCAmelCase : List[Any] = batch_size
UpperCAmelCase : Union[str, Any] = seq_length
UpperCAmelCase : Optional[int] = is_training
UpperCAmelCase : str = use_input_mask
UpperCAmelCase : Optional[int] = use_token_type_ids
UpperCAmelCase : Dict = use_labels
UpperCAmelCase : str = vocab_size
UpperCAmelCase : Optional[int] = hidden_size
UpperCAmelCase : str = num_hidden_layers
UpperCAmelCase : Any = num_attention_heads
UpperCAmelCase : Union[str, Any] = intermediate_size
UpperCAmelCase : str = hidden_act
UpperCAmelCase : Any = hidden_dropout_prob
UpperCAmelCase : str = attention_probs_dropout_prob
UpperCAmelCase : Tuple = max_position_embeddings
UpperCAmelCase : Optional[Any] = type_vocab_size
UpperCAmelCase : Optional[Any] = type_sequence_label_size
UpperCAmelCase : str = initializer_range
UpperCAmelCase : List[Any] = num_labels
UpperCAmelCase : Dict = num_choices
UpperCAmelCase : Tuple = scope
def _lowercase( self ) -> Dict:
UpperCAmelCase : Dict = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCAmelCase : Optional[Any] = None
if self.use_input_mask:
UpperCAmelCase : Optional[int] = random_attention_mask([self.batch_size, self.seq_length] )
UpperCAmelCase : Optional[Any] = None
if self.use_token_type_ids:
UpperCAmelCase : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
UpperCAmelCase : Union[str, Any] = None
UpperCAmelCase : Dict = None
UpperCAmelCase : Union[str, Any] = None
if self.use_labels:
UpperCAmelCase : Optional[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCAmelCase : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
UpperCAmelCase : Optional[Any] = ids_tensor([self.batch_size] , self.num_choices )
UpperCAmelCase : int = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def _lowercase( self ) -> Dict:
return OpenLlamaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=A , initializer_range=self.initializer_range , use_stable_embedding=A , )
def _lowercase( self , A , A , A , A , A , A , A ) -> str:
UpperCAmelCase : Union[str, Any] = OpenLlamaModel(config=A )
model.to(A )
model.eval()
UpperCAmelCase : Dict = model(A , attention_mask=A )
UpperCAmelCase : Optional[int] = model(A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _lowercase( self , A , A , A , A , A , A , A , A , A , ) -> List[Any]:
UpperCAmelCase : Optional[int] = True
UpperCAmelCase : Union[str, Any] = OpenLlamaModel(A )
model.to(A )
model.eval()
UpperCAmelCase : List[Any] = model(
A , attention_mask=A , encoder_hidden_states=A , encoder_attention_mask=A , )
UpperCAmelCase : str = model(
A , attention_mask=A , encoder_hidden_states=A , )
UpperCAmelCase : List[Any] = model(A , attention_mask=A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _lowercase( self , A , A , A , A , A , A , A , A , A , ) -> int:
UpperCAmelCase : Optional[int] = OpenLlamaForCausalLM(config=A )
model.to(A )
model.eval()
UpperCAmelCase : Tuple = model(A , attention_mask=A , labels=A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _lowercase( self , A , A , A , A , A , A , A , A , A , ) -> int:
UpperCAmelCase : Dict = True
UpperCAmelCase : Tuple = True
UpperCAmelCase : str = OpenLlamaForCausalLM(config=A )
model.to(A )
model.eval()
# first forward pass
UpperCAmelCase : Union[str, Any] = model(
A , attention_mask=A , encoder_hidden_states=A , encoder_attention_mask=A , use_cache=A , )
UpperCAmelCase : str = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
UpperCAmelCase : Tuple = ids_tensor((self.batch_size, 3) , config.vocab_size )
UpperCAmelCase : Tuple = ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
UpperCAmelCase : List[str] = torch.cat([input_ids, next_tokens] , dim=-1 )
UpperCAmelCase : List[Any] = torch.cat([input_mask, next_mask] , dim=-1 )
UpperCAmelCase : List[Any] = model(
A , attention_mask=A , encoder_hidden_states=A , encoder_attention_mask=A , output_hidden_states=A , )["""hidden_states"""][0]
UpperCAmelCase : Optional[Any] = model(
A , attention_mask=A , encoder_hidden_states=A , encoder_attention_mask=A , past_key_values=A , output_hidden_states=A , )["""hidden_states"""][0]
# select random slice
UpperCAmelCase : str = ids_tensor((1,) , output_from_past.shape[-1] ).item()
UpperCAmelCase : Any = output_from_no_past[:, -3:, random_slice_idx].detach()
UpperCAmelCase : Dict = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(A , A , atol=1e-3 ) )
def _lowercase( self ) -> Union[str, Any]:
UpperCAmelCase : Union[str, Any] = self.prepare_config_and_inputs()
(
(
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) ,
) : Tuple = config_and_inputs
UpperCAmelCase : str = {"""input_ids""": input_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class UpperCamelCase_ ( __magic_name__ , __magic_name__ , __magic_name__ , unittest.TestCase ):
lowercase = (
(OpenLlamaModel, OpenLlamaForCausalLM, OpenLlamaForSequenceClassification) if is_torch_available() else ()
)
lowercase = (OpenLlamaForCausalLM,) if is_torch_available() else ()
lowercase = (
{
'feature-extraction': OpenLlamaModel,
'text-classification': OpenLlamaForSequenceClassification,
'text-generation': OpenLlamaForCausalLM,
'zero-shot': OpenLlamaForSequenceClassification,
}
if is_torch_available()
else {}
)
lowercase = False
lowercase = False
def _lowercase( self ) -> Tuple:
UpperCAmelCase : Dict = OpenLlamaModelTester(self )
UpperCAmelCase : List[str] = ConfigTester(self , config_class=A , hidden_size=37 )
def _lowercase( self ) -> Optional[Any]:
self.config_tester.run_common_tests()
def _lowercase( self ) -> int:
UpperCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*A )
def _lowercase( self ) -> str:
UpperCAmelCase : Any = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
UpperCAmelCase : int = type
self.model_tester.create_and_check_model(*A )
def _lowercase( self ) -> str:
UpperCAmelCase , UpperCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase : List[str] = 3
UpperCAmelCase : Optional[Any] = input_dict["""input_ids"""]
UpperCAmelCase : str = input_ids.ne(1 ).to(A )
UpperCAmelCase : Tuple = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
UpperCAmelCase : Optional[Any] = OpenLlamaForSequenceClassification(A )
model.to(A )
model.eval()
UpperCAmelCase : List[Any] = model(A , attention_mask=A , labels=A )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def _lowercase( self ) -> Union[str, Any]:
UpperCAmelCase , UpperCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase : Dict = 3
UpperCAmelCase : Any = """single_label_classification"""
UpperCAmelCase : Dict = input_dict["""input_ids"""]
UpperCAmelCase : Optional[Any] = input_ids.ne(1 ).to(A )
UpperCAmelCase : str = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
UpperCAmelCase : Optional[Any] = OpenLlamaForSequenceClassification(A )
model.to(A )
model.eval()
UpperCAmelCase : Tuple = model(A , attention_mask=A , labels=A )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def _lowercase( self ) -> int:
UpperCAmelCase , UpperCAmelCase : Dict = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase : Tuple = 3
UpperCAmelCase : Optional[Any] = """multi_label_classification"""
UpperCAmelCase : Dict = input_dict["""input_ids"""]
UpperCAmelCase : int = input_ids.ne(1 ).to(A )
UpperCAmelCase : int = ids_tensor(
[self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float )
UpperCAmelCase : Any = OpenLlamaForSequenceClassification(A )
model.to(A )
model.eval()
UpperCAmelCase : Dict = model(A , attention_mask=A , labels=A )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
@unittest.skip("""Open-Llama buffers include complex numbers, which breaks this test""" )
def _lowercase( self ) -> Dict:
pass
@parameterized.expand([("""linear""",), ("""dynamic""",)] )
def _lowercase( self , A ) -> str:
UpperCAmelCase , UpperCAmelCase : Any = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase : Dict = ids_tensor([1, 10] , config.vocab_size )
UpperCAmelCase : List[str] = ids_tensor([1, int(config.max_position_embeddings * 1.5 )] , config.vocab_size )
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
UpperCAmelCase : Any = OpenLlamaModel(A )
original_model.to(A )
original_model.eval()
UpperCAmelCase : List[str] = original_model(A ).last_hidden_state
UpperCAmelCase : List[Any] = original_model(A ).last_hidden_state
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
UpperCAmelCase : Union[str, Any] = {"""type""": scaling_type, """factor""": 1_0.0}
UpperCAmelCase : str = OpenLlamaModel(A )
scaled_model.to(A )
scaled_model.eval()
UpperCAmelCase : List[str] = scaled_model(A ).last_hidden_state
UpperCAmelCase : Optional[int] = scaled_model(A ).last_hidden_state
# Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original
# maximum sequence length, so the outputs for the short input should match.
if scaling_type == "dynamic":
self.assertTrue(torch.allclose(A , A , atol=1e-5 ) )
else:
self.assertFalse(torch.allclose(A , A , atol=1e-5 ) )
# The output should be different for long inputs
self.assertFalse(torch.allclose(A , A , atol=1e-5 ) )
| 672 | 1 |
'''simple docstring'''
import inspect
import unittest
from transformers import ViTHybridConfig
from transformers.testing_utils import require_accelerate, require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTHybridForImageClassification, ViTHybridImageProcessor, ViTHybridModel
from transformers.models.vit_hybrid.modeling_vit_hybrid import VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
class UpperCamelCase_ :
def __init__( self , A , A=13 , A=64 , A=2 , A=3 , A=True , A=True , A=32 , A=5 , A=4 , A=37 , A="gelu" , A=0.1 , A=0.1 , A=10 , A=0.0_2 , A=[1, 16, 4, 4] , A=None , ) -> Any:
UpperCAmelCase : Union[str, Any] = parent
UpperCAmelCase : List[Any] = batch_size
UpperCAmelCase : Dict = image_size
UpperCAmelCase : List[Any] = patch_size
UpperCAmelCase : Optional[Any] = num_channels
UpperCAmelCase : int = is_training
UpperCAmelCase : Any = use_labels
UpperCAmelCase : Optional[Any] = hidden_size
UpperCAmelCase : Tuple = num_hidden_layers
UpperCAmelCase : List[str] = num_attention_heads
UpperCAmelCase : Dict = intermediate_size
UpperCAmelCase : Optional[Any] = hidden_act
UpperCAmelCase : Dict = hidden_dropout_prob
UpperCAmelCase : str = attention_probs_dropout_prob
UpperCAmelCase : Dict = type_sequence_label_size
UpperCAmelCase : Optional[Any] = initializer_range
UpperCAmelCase : Optional[int] = scope
UpperCAmelCase : List[Any] = backbone_featmap_shape
# in ViT hybrid, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
# the number of patches is based on the feature map of the backbone, which by default uses an output stride
# of 32, which means that the feature map has a spatial resolution of 1/32 of the input image size
UpperCAmelCase : Optional[Any] = (self.image_size // 32) ** 2
UpperCAmelCase : List[Any] = num_patches + 1
def _lowercase( self ) -> Dict:
UpperCAmelCase : List[str] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCAmelCase : Any = None
if self.use_labels:
UpperCAmelCase : Tuple = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCAmelCase : Dict = self.get_config()
return config, pixel_values, labels
def _lowercase( self ) -> int:
UpperCAmelCase : List[str] = {
"""global_padding""": """same""",
"""layer_type""": """bottleneck""",
"""depths""": [3, 4, 9],
"""out_features""": ["""stage1""", """stage2""", """stage3"""],
"""embedding_dynamic_padding""": True,
"""hidden_sizes""": [4, 8, 16, 32],
"""num_groups""": 2,
}
return ViTHybridConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=A , initializer_range=self.initializer_range , backbone_featmap_shape=self.backbone_featmap_shape , backbone_config=A , )
def _lowercase( self , A , A , A ) -> Optional[Any]:
UpperCAmelCase : str = ViTHybridModel(config=A )
model.to(A )
model.eval()
UpperCAmelCase : List[str] = model(A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _lowercase( self , A , A , A ) -> int:
UpperCAmelCase : str = self.type_sequence_label_size
UpperCAmelCase : int = ViTHybridForImageClassification(A )
model.to(A )
model.eval()
UpperCAmelCase : Any = model(A , labels=A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def _lowercase( self ) -> Union[str, Any]:
UpperCAmelCase : List[Any] = self.prepare_config_and_inputs()
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : str = config_and_inputs
UpperCAmelCase : Union[str, Any] = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class UpperCamelCase_ ( __magic_name__ , __magic_name__ , unittest.TestCase ):
lowercase = (ViTHybridModel, ViTHybridForImageClassification) if is_torch_available() else ()
lowercase = (
{'feature-extraction': ViTHybridModel, 'image-classification': ViTHybridForImageClassification}
if is_torch_available()
else {}
)
lowercase = False
lowercase = False
lowercase = False
def _lowercase( self ) -> List[Any]:
UpperCAmelCase : List[str] = ViTHybridModelTester(self )
UpperCAmelCase : Dict = ConfigTester(self , config_class=A , has_text_modality=A , hidden_size=37 )
def _lowercase( self ) -> Optional[int]:
self.config_tester.run_common_tests()
@unittest.skip(reason="""ViT does not use inputs_embeds""" )
def _lowercase( self ) -> Any:
pass
def _lowercase( self ) -> Optional[Any]:
UpperCAmelCase , UpperCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase : Any = model_class(A )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
UpperCAmelCase : Tuple = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(A , nn.Linear ) )
def _lowercase( self ) -> Optional[Any]:
UpperCAmelCase , UpperCAmelCase : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase : Tuple = model_class(A )
UpperCAmelCase : List[str] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase : str = [*signature.parameters.keys()]
UpperCAmelCase : List[Any] = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , A )
def _lowercase( self ) -> List[Any]:
UpperCAmelCase : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*A )
def _lowercase( self ) -> Any:
UpperCAmelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*A )
def _lowercase( self ) -> Optional[int]:
UpperCAmelCase , UpperCAmelCase : Any = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase : int = _config_zero_init(A )
for model_class in self.all_model_classes:
UpperCAmelCase : int = model_class(config=A )
# Skip the check for the backbone
for name, module in model.named_modules():
if module.__class__.__name__ == "ViTHybridPatchEmbeddings":
UpperCAmelCase : str = [f'''{name}.{key}''' for key in module.state_dict().keys()]
break
for name, param in model.named_parameters():
if param.requires_grad:
if name in backbone_params:
continue
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item() , [0.0, 1.0] , msg=f'''Parameter {name} of model {model_class} seems not properly initialized''' , )
@slow
def _lowercase( self ) -> str:
for model_name in VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase : List[str] = ViTHybridModel.from_pretrained(A )
self.assertIsNotNone(A )
def __lowerCamelCase ( ) -> Tuple:
UpperCAmelCase : Any = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class UpperCamelCase_ ( unittest.TestCase ):
@cached_property
def _lowercase( self ) -> Optional[int]:
return (
ViTHybridImageProcessor.from_pretrained(VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
if is_vision_available()
else None
)
@slow
def _lowercase( self ) -> List[str]:
UpperCAmelCase : int = ViTHybridForImageClassification.from_pretrained(VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(
A )
UpperCAmelCase : int = self.default_image_processor
UpperCAmelCase : List[Any] = prepare_img()
UpperCAmelCase : Tuple = image_processor(images=A , return_tensors="""pt""" ).to(A )
# forward pass
with torch.no_grad():
UpperCAmelCase : str = model(**A )
# verify the logits
UpperCAmelCase : Tuple = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , A )
UpperCAmelCase : int = torch.tensor([-1.9_0_9_0, -0.4_9_9_3, -0.2_3_8_9] ).to(A )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , A , atol=1e-4 ) )
@slow
@require_accelerate
def _lowercase( self ) -> List[str]:
UpperCAmelCase : Any = ViTHybridImageProcessor.from_pretrained("""google/vit-hybrid-base-bit-384""" )
UpperCAmelCase : Optional[int] = ViTHybridForImageClassification.from_pretrained("""google/vit-hybrid-base-bit-384""" , device_map="""auto""" )
UpperCAmelCase : Dict = prepare_img()
UpperCAmelCase : List[str] = image_processor(images=A , return_tensors="""pt""" )
UpperCAmelCase : Any = model(**A )
UpperCAmelCase : List[Any] = outputs.logits
# model predicts one of the 1000 ImageNet classes
UpperCAmelCase : Tuple = logits.argmax(-1 ).item()
self.assertTrue(model.config.idalabel[predicted_class_idx] , """tabby, tabby cat""" )
| 672 |
'''simple docstring'''
import math
def __lowerCamelCase ( _lowercase ) -> bool:
assert isinstance(_lowercase , _lowercase ) and (
number >= 0
), "'number' must been an int and positive"
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or not number % 2:
# Negatives, 0, 1 and all even numbers are not primes
return False
UpperCAmelCase : str = range(3 , int(math.sqrt(_lowercase ) + 1 ) , 2 )
return not any(not number % i for i in odd_numbers )
def __lowerCamelCase ( _lowercase , _lowercase=1 , **_lowercase ) -> Union[str, Any]:
UpperCAmelCase : Optional[int] = factor * value
UpperCAmelCase : List[Any] = value
while not is_prime(_lowercase ):
value += 1 if not ("desc" in kwargs and kwargs["desc"] is True) else -1
if value == first_value_val:
return next_prime(value + 1 , **_lowercase )
return value
| 672 | 1 |
'''simple docstring'''
import os
import tempfile
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from torch import nn
from transformers import (
Adafactor,
AdamW,
get_constant_schedule,
get_constant_schedule_with_warmup,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_inverse_sqrt_schedule,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
)
def __lowerCamelCase ( _lowercase , _lowercase=1_0 ) -> int:
UpperCAmelCase : Optional[int] = []
for _ in range(_lowercase ):
lrs.append(scheduler.get_lr()[0] )
scheduler.step()
return lrs
def __lowerCamelCase ( _lowercase , _lowercase=1_0 ) -> str:
UpperCAmelCase : Union[str, Any] = []
for step in range(_lowercase ):
lrs.append(scheduler.get_lr()[0] )
scheduler.step()
if step == num_steps // 2:
with tempfile.TemporaryDirectory() as tmpdirname:
UpperCAmelCase : Dict = os.path.join(_lowercase , """schedule.bin""" )
torch.save(scheduler.state_dict() , _lowercase )
UpperCAmelCase : List[Any] = torch.load(_lowercase )
scheduler.load_state_dict(_lowercase )
return lrs
@require_torch
class UpperCamelCase_ ( unittest.TestCase ):
def _lowercase( self , A , A , A ) -> Optional[Any]:
self.assertEqual(len(A ) , len(A ) )
for a, b in zip(A , A ):
self.assertAlmostEqual(A , A , delta=A )
def _lowercase( self ) -> List[str]:
UpperCAmelCase : Tuple = torch.tensor([0.1, -0.2, -0.1] , requires_grad=A )
UpperCAmelCase : Any = torch.tensor([0.4, 0.2, -0.5] )
UpperCAmelCase : List[str] = nn.MSELoss()
# No warmup, constant schedule, no gradient clipping
UpperCAmelCase : Any = AdamW(params=[w] , lr=2e-1 , weight_decay=0.0 )
for _ in range(100 ):
UpperCAmelCase : Dict = criterion(A , A )
loss.backward()
optimizer.step()
w.grad.detach_() # No zero_grad() function on simple tensors. we do it ourselves.
w.grad.zero_()
self.assertListAlmostEqual(w.tolist() , [0.4, 0.2, -0.5] , tol=1e-2 )
def _lowercase( self ) -> str:
UpperCAmelCase : int = torch.tensor([0.1, -0.2, -0.1] , requires_grad=A )
UpperCAmelCase : Optional[Any] = torch.tensor([0.4, 0.2, -0.5] )
UpperCAmelCase : str = nn.MSELoss()
# No warmup, constant schedule, no gradient clipping
UpperCAmelCase : Any = Adafactor(
params=[w] , lr=1e-2 , eps=(1e-30, 1e-3) , clip_threshold=1.0 , decay_rate=-0.8 , betaa=A , weight_decay=0.0 , relative_step=A , scale_parameter=A , warmup_init=A , )
for _ in range(1000 ):
UpperCAmelCase : Union[str, Any] = criterion(A , A )
loss.backward()
optimizer.step()
w.grad.detach_() # No zero_grad() function on simple tensors. we do it ourselves.
w.grad.zero_()
self.assertListAlmostEqual(w.tolist() , [0.4, 0.2, -0.5] , tol=1e-2 )
@require_torch
class UpperCamelCase_ ( unittest.TestCase ):
lowercase = nn.Linear(50 , 50 ) if is_torch_available() else None
lowercase = AdamW(m.parameters() , lr=10.0 ) if is_torch_available() else None
lowercase = 10
def _lowercase( self , A , A , A , A=None ) -> List[str]:
self.assertEqual(len(A ) , len(A ) )
for a, b in zip(A , A ):
self.assertAlmostEqual(A , A , delta=A , msg=A )
def _lowercase( self ) -> Union[str, Any]:
UpperCAmelCase : int = {"""num_warmup_steps""": 2, """num_training_steps""": 10}
# schedulers doct format
# function: (sched_args_dict, expected_learning_rates)
UpperCAmelCase : Optional[int] = {
get_constant_schedule: ({}, [1_0.0] * self.num_steps),
get_constant_schedule_with_warmup: (
{"""num_warmup_steps""": 4},
[0.0, 2.5, 5.0, 7.5, 1_0.0, 1_0.0, 1_0.0, 1_0.0, 1_0.0, 1_0.0],
),
get_linear_schedule_with_warmup: (
{**common_kwargs},
[0.0, 5.0, 1_0.0, 8.7_5, 7.5, 6.2_5, 5.0, 3.7_5, 2.5, 1.2_5],
),
get_cosine_schedule_with_warmup: (
{**common_kwargs},
[0.0, 5.0, 1_0.0, 9.6_1, 8.5_3, 6.9_1, 5.0, 3.0_8, 1.4_6, 0.3_8],
),
get_cosine_with_hard_restarts_schedule_with_warmup: (
{**common_kwargs, """num_cycles""": 2},
[0.0, 5.0, 1_0.0, 8.5_3, 5.0, 1.4_6, 1_0.0, 8.5_3, 5.0, 1.4_6],
),
get_polynomial_decay_schedule_with_warmup: (
{**common_kwargs, """power""": 2.0, """lr_end""": 1e-7},
[0.0, 5.0, 1_0.0, 7.6_5_6, 5.6_2_5, 3.9_0_6, 2.5, 1.4_0_6, 0.6_2_5, 0.1_5_6],
),
get_inverse_sqrt_schedule: (
{"""num_warmup_steps""": 2},
[0.0, 5.0, 1_0.0, 8.1_6_5, 7.0_7_1, 6.3_2_5, 5.7_7_4, 5.3_4_5, 5.0, 4.7_1_4],
),
}
for scheduler_func, data in scheds.items():
UpperCAmelCase , UpperCAmelCase : Tuple = data
UpperCAmelCase : List[Any] = scheduler_func(self.optimizer , **A )
self.assertEqual(len([scheduler.get_lr()[0]] ) , 1 )
UpperCAmelCase : List[Any] = unwrap_schedule(A , self.num_steps )
self.assertListAlmostEqual(
A , A , tol=1e-2 , msg=f'''failed for {scheduler_func} in normal scheduler''' , )
UpperCAmelCase : Any = scheduler_func(self.optimizer , **A )
if scheduler_func.__name__ != "get_constant_schedule":
LambdaScheduleWrapper.wrap_scheduler(A ) # wrap to test picklability of the schedule
UpperCAmelCase : Dict = unwrap_and_save_reload_schedule(A , self.num_steps )
self.assertListEqual(A , A , msg=f'''failed for {scheduler_func} in save and reload''' )
class UpperCamelCase_ :
def __init__( self , A ) -> Optional[int]:
UpperCAmelCase : Optional[int] = fn
def __call__( self , *A , **A ) -> Optional[int]:
return self.fn(*A , **A )
@classmethod
def _lowercase( self , A ) -> Any:
UpperCAmelCase : Tuple = list(map(self , scheduler.lr_lambdas ) )
| 672 |
'''simple docstring'''
def __lowerCamelCase ( _lowercase = "The quick brown fox jumps over the lazy dog" , ) -> bool:
UpperCAmelCase : Union[str, Any] = set()
# Replace all the whitespace in our sentence
UpperCAmelCase : List[str] = input_str.replace(""" """ , """""" )
for alpha in input_str:
if "a" <= alpha.lower() <= "z":
frequency.add(alpha.lower() )
return len(_lowercase ) == 2_6
def __lowerCamelCase ( _lowercase = "The quick brown fox jumps over the lazy dog" , ) -> bool:
UpperCAmelCase : Tuple = [False] * 2_6
for char in input_str:
if char.islower():
UpperCAmelCase : Any = True
elif char.isupper():
UpperCAmelCase : Union[str, Any] = True
return all(_lowercase )
def __lowerCamelCase ( _lowercase = "The quick brown fox jumps over the lazy dog" , ) -> bool:
return len({char for char in input_str.lower() if char.isalpha()} ) == 2_6
def __lowerCamelCase ( ) -> None:
from timeit import timeit
UpperCAmelCase : str = """from __main__ import is_pangram, is_pangram_faster, is_pangram_fastest"""
print(timeit("""is_pangram()""" , setup=_lowercase ) )
print(timeit("""is_pangram_faster()""" , setup=_lowercase ) )
print(timeit("""is_pangram_fastest()""" , setup=_lowercase ) )
# 5.348480500048026, 2.6477354579837993, 1.8470395830227062
# 5.036091582966037, 2.644472333951853, 1.8869528750656173
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 672 | 1 |
'''simple docstring'''
import os
from glob import glob
import imageio
import torch
import torchvision
import wandb
from img_processing import custom_to_pil, loop_post_process, preprocess, preprocess_vqgan
from loaders import load_vqgan
from PIL import Image
from torch import nn
from transformers import CLIPModel, CLIPTokenizerFast
from utils import get_device, get_timestamp, show_pil
class UpperCamelCase_ :
def __init__( self , A = "cpu" , A = "openai/clip-vit-large-patch14" ) -> None:
UpperCAmelCase : Dict = device
UpperCAmelCase : Optional[int] = CLIPTokenizerFast.from_pretrained(A )
UpperCAmelCase : int = [0.4_8_1_4_5_4_6_6, 0.4_5_7_8_2_7_5, 0.4_0_8_2_1_0_7_3]
UpperCAmelCase : Dict = [0.2_6_8_6_2_9_5_4, 0.2_6_1_3_0_2_5_8, 0.2_7_5_7_7_7_1_1]
UpperCAmelCase : int = torchvision.transforms.Normalize(self.image_mean , self.image_std )
UpperCAmelCase : Optional[Any] = torchvision.transforms.Resize(224 )
UpperCAmelCase : Optional[Any] = torchvision.transforms.CenterCrop(224 )
def _lowercase( self , A ) -> Optional[int]:
UpperCAmelCase : List[Any] = self.resize(A )
UpperCAmelCase : int = self.center_crop(A )
UpperCAmelCase : str = self.normalize(A )
return images
def __call__( self , A=None , A=None , **A ) -> Optional[int]:
UpperCAmelCase : int = self.tokenizer(text=A , **A )
UpperCAmelCase : str = self.preprocess_img(A )
UpperCAmelCase : List[Any] = {key: value.to(self.device ) for (key, value) in encoding.items()}
return encoding
class UpperCamelCase_ ( nn.Module ):
def __init__( self , A=10 , A=0.0_1 , A=None , A=None , A=None , A=None , A=None , A=None , A=False , A=True , A="image" , A=True , A=False , A=False , A=False , ) -> None:
super().__init__()
UpperCAmelCase : Tuple = None
UpperCAmelCase : Union[str, Any] = device if device else get_device()
if vqgan:
UpperCAmelCase : Tuple = vqgan
else:
UpperCAmelCase : Optional[int] = load_vqgan(self.device , conf_path=A , ckpt_path=A )
self.vqgan.eval()
if clip:
UpperCAmelCase : Tuple = clip
else:
UpperCAmelCase : int = CLIPModel.from_pretrained("""openai/clip-vit-base-patch32""" )
self.clip.to(self.device )
UpperCAmelCase : int = ProcessorGradientFlow(device=self.device )
UpperCAmelCase : List[str] = iterations
UpperCAmelCase : List[str] = lr
UpperCAmelCase : Union[str, Any] = log
UpperCAmelCase : Any = make_grid
UpperCAmelCase : Tuple = return_val
UpperCAmelCase : Dict = quantize
UpperCAmelCase : int = self.vqgan.decoder.z_shape
def _lowercase( self , A=None , A=None , A=5 , A=True ) -> List[str]:
UpperCAmelCase : Any = []
if output_path is None:
UpperCAmelCase : Union[str, Any] = """./animation.gif"""
if input_path is None:
UpperCAmelCase : Optional[int] = self.save_path
UpperCAmelCase : Dict = sorted(glob(input_path + """/*""" ) )
if not len(A ):
raise ValueError(
"""No images found in save path, aborting (did you pass save_intermediate=True to the generate"""
""" function?)""" )
if len(A ) == 1:
print("""Only one image found in save path, (did you pass save_intermediate=True to the generate function?)""" )
UpperCAmelCase : Union[str, Any] = total_duration / len(A )
UpperCAmelCase : str = [frame_duration] * len(A )
if extend_frames:
UpperCAmelCase : List[str] = 1.5
UpperCAmelCase : str = 3
for file_name in paths:
if file_name.endswith(""".png""" ):
images.append(imageio.imread(A ) )
imageio.mimsave(A , A , duration=A )
print(f'''gif saved to {output_path}''' )
def _lowercase( self , A=None , A=None ) -> Optional[int]:
if not (path or img):
raise ValueError("""Input either path or tensor""" )
if img is not None:
raise NotImplementedError
UpperCAmelCase : int = preprocess(Image.open(A ) , target_image_size=256 ).to(self.device )
UpperCAmelCase : int = preprocess_vqgan(A )
UpperCAmelCase , *UpperCAmelCase : int = self.vqgan.encode(A )
return z
def _lowercase( self , A ) -> int:
UpperCAmelCase : int = self.latent.detach().requires_grad_()
UpperCAmelCase : int = base_latent + transform_vector
if self.quantize:
UpperCAmelCase , *UpperCAmelCase : Dict = self.vqgan.quantize(A )
else:
UpperCAmelCase : Union[str, Any] = trans_latent
return self.vqgan.decode(A )
def _lowercase( self , A , A , A=None ) -> Optional[int]:
UpperCAmelCase : List[str] = self.clip_preprocessor(text=A , images=A , return_tensors="""pt""" , padding=A )
UpperCAmelCase : List[Any] = self.clip(**A )
UpperCAmelCase : Dict = clip_outputs.logits_per_image
if weights is not None:
UpperCAmelCase : int = similarity_logits * weights
return similarity_logits.sum()
def _lowercase( self , A , A , A ) -> Dict:
UpperCAmelCase : int = self._get_clip_similarity(pos_prompts["""prompts"""] , A , weights=(1 / pos_prompts["""weights"""]) )
if neg_prompts:
UpperCAmelCase : Any = self._get_clip_similarity(neg_prompts["""prompts"""] , A , weights=neg_prompts["""weights"""] )
else:
UpperCAmelCase : Optional[int] = torch.tensor([1] , device=self.device )
UpperCAmelCase : List[Any] = -torch.log(A ) + torch.log(A )
return loss
def _lowercase( self , A , A , A ) -> str:
UpperCAmelCase : int = torch.randn_like(self.latent , requires_grad=A , device=self.device )
UpperCAmelCase : List[str] = torch.optim.Adam([vector] , lr=self.lr )
for i in range(self.iterations ):
optim.zero_grad()
UpperCAmelCase : Any = self._add_vector(A )
UpperCAmelCase : str = loop_post_process(A )
UpperCAmelCase : List[Any] = self._get_CLIP_loss(A , A , A )
print("""CLIP loss""" , A )
if self.log:
wandb.log({"""CLIP Loss""": clip_loss} )
clip_loss.backward(retain_graph=A )
optim.step()
if self.return_val == "image":
yield custom_to_pil(transformed_img[0] )
else:
yield vector
def _lowercase( self , A , A , A ) -> Dict:
wandb.init(reinit=A , project="""face-editor""" )
wandb.config.update({"""Positive Prompts""": positive_prompts} )
wandb.config.update({"""Negative Prompts""": negative_prompts} )
wandb.config.update({"""lr""": self.lr, """iterations""": self.iterations} )
if image_path:
UpperCAmelCase : Any = Image.open(A )
UpperCAmelCase : int = image.resize((256, 256) )
wandb.log("""Original Image""" , wandb.Image(A ) )
def _lowercase( self , A ) -> Dict:
if not prompts:
return []
UpperCAmelCase : Dict = []
UpperCAmelCase : List[str] = []
if isinstance(A , A ):
UpperCAmelCase : Tuple = [prompt.strip() for prompt in prompts.split("""|""" )]
for prompt in prompts:
if isinstance(A , (tuple, list) ):
UpperCAmelCase : str = prompt[0]
UpperCAmelCase : List[Any] = float(prompt[1] )
elif ":" in prompt:
UpperCAmelCase , UpperCAmelCase : Union[str, Any] = prompt.split(""":""" )
UpperCAmelCase : str = float(A )
else:
UpperCAmelCase : Tuple = prompt
UpperCAmelCase : Dict = 1.0
processed_prompts.append(A )
weights.append(A )
return {
"prompts": processed_prompts,
"weights": torch.tensor(A , device=self.device ),
}
def _lowercase( self , A , A=None , A=None , A=True , A=False , A=True , A=True , A=None , ) -> Tuple:
if image_path:
UpperCAmelCase : List[Any] = self._get_latent(A )
else:
UpperCAmelCase : List[str] = torch.randn(self.latent_dim , device=self.device )
if self.log:
self._init_logging(A , A , A )
assert pos_prompts, "You must provide at least one positive prompt."
UpperCAmelCase : List[Any] = self.process_prompts(A )
UpperCAmelCase : List[str] = self.process_prompts(A )
if save_final and save_path is None:
UpperCAmelCase : Tuple = os.path.join("""./outputs/""" , """_""".join(pos_prompts["""prompts"""] ) )
if not os.path.exists(A ):
os.makedirs(A )
else:
UpperCAmelCase : List[str] = save_path + """_""" + get_timestamp()
os.makedirs(A )
UpperCAmelCase : Optional[Any] = save_path
UpperCAmelCase : Tuple = self.vqgan.decode(self.latent )[0]
if show_intermediate:
print("""Original Image""" )
show_pil(custom_to_pil(A ) )
UpperCAmelCase : str = loop_post_process(A )
for iter, transformed_img in enumerate(self._optimize_CLIP(A , A , A ) ):
if show_intermediate:
show_pil(A )
if save_intermediate:
transformed_img.save(os.path.join(self.save_path , f'''iter_{iter:03d}.png''' ) )
if self.log:
wandb.log({"""Image""": wandb.Image(A )} )
if show_final:
show_pil(A )
if save_final:
transformed_img.save(os.path.join(self.save_path , f'''iter_{iter:03d}_final.png''' ) )
| 672 |
'''simple docstring'''
from sklearn.metrics import mean_squared_error
import datasets
a : Union[str, Any] = """\
@article{scikit-learn,
title={Scikit-learn: Machine Learning in {P}ython},
author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V.
and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P.
and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and
Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.},
journal={Journal of Machine Learning Research},
volume={12},
pages={2825--2830},
year={2011}
}
"""
a : int = """\
Mean Squared Error(MSE) is the average of the square of difference between the predicted
and actual values.
"""
a : int = """
Args:
predictions: array-like of shape (n_samples,) or (n_samples, n_outputs)
Estimated target values.
references: array-like of shape (n_samples,) or (n_samples, n_outputs)
Ground truth (correct) target values.
sample_weight: array-like of shape (n_samples,), default=None
Sample weights.
multioutput: {\"raw_values\", \"uniform_average\"} or array-like of shape (n_outputs,), default=\"uniform_average\"
Defines aggregating of multiple output values. Array-like value defines weights used to average errors.
\"raw_values\" : Returns a full set of errors in case of multioutput input.
\"uniform_average\" : Errors of all outputs are averaged with uniform weight.
squared : bool, default=True
If True returns MSE value, if False returns RMSE (Root Mean Squared Error) value.
Returns:
mse : mean squared error.
Examples:
>>> mse_metric = datasets.load_metric(\"mse\")
>>> predictions = [2.5, 0.0, 2, 8]
>>> references = [3, -0.5, 2, 7]
>>> results = mse_metric.compute(predictions=predictions, references=references)
>>> print(results)
{'mse': 0.375}
>>> rmse_result = mse_metric.compute(predictions=predictions, references=references, squared=False)
>>> print(rmse_result)
{'mse': 0.6123724356957945}
If you're using multi-dimensional lists, then set the config as follows :
>>> mse_metric = datasets.load_metric(\"mse\", \"multilist\")
>>> predictions = [[0.5, 1], [-1, 1], [7, -6]]
>>> references = [[0, 2], [-1, 2], [8, -5]]
>>> results = mse_metric.compute(predictions=predictions, references=references)
>>> print(results)
{'mse': 0.7083333333333334}
>>> results = mse_metric.compute(predictions=predictions, references=references, multioutput='raw_values')
>>> print(results) # doctest: +NORMALIZE_WHITESPACE
{'mse': array([0.41666667, 1. ])}
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class UpperCamelCase_ ( datasets.Metric ):
def _lowercase( self ) -> List[str]:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(self._get_feature_types() ) , reference_urls=[
"""https://scikit-learn.org/stable/modules/generated/sklearn.metrics.mean_squared_error.html"""
] , )
def _lowercase( self ) -> List[Any]:
if self.config_name == "multilist":
return {
"predictions": datasets.Sequence(datasets.Value("""float""" ) ),
"references": datasets.Sequence(datasets.Value("""float""" ) ),
}
else:
return {
"predictions": datasets.Value("""float""" ),
"references": datasets.Value("""float""" ),
}
def _lowercase( self , A , A , A=None , A="uniform_average" , A=True ) -> List[Any]:
UpperCAmelCase : List[Any] = mean_squared_error(
A , A , sample_weight=A , multioutput=A , squared=A )
return {"mse": mse}
| 672 | 1 |
'''simple docstring'''
from math import pow
def __lowerCamelCase ( _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , ) -> tuple[int, int]:
if current_sum == needed_sum:
# If the sum of the powers is equal to needed_sum, then we have a solution.
solutions_count += 1
return current_sum, solutions_count
UpperCAmelCase : Union[str, Any] = int(pow(_lowercase , _lowercase ) )
if current_sum + i_to_n <= needed_sum:
# If the sum of the powers is less than needed_sum, then continue adding powers.
current_sum += i_to_n
UpperCAmelCase , UpperCAmelCase : Dict = backtrack(
_lowercase , _lowercase , current_number + 1 , _lowercase , _lowercase )
current_sum -= i_to_n
if i_to_n < needed_sum:
# If the power of i is less than needed_sum, then try with the next power.
UpperCAmelCase , UpperCAmelCase : List[Any] = backtrack(
_lowercase , _lowercase , current_number + 1 , _lowercase , _lowercase )
return current_sum, solutions_count
def __lowerCamelCase ( _lowercase , _lowercase ) -> int:
if not (1 <= needed_sum <= 1_0_0_0 and 2 <= power <= 1_0):
raise ValueError(
"""Invalid input\n"""
"""needed_sum must be between 1 and 1000, power between 2 and 10.""" )
return backtrack(_lowercase , _lowercase , 1 , 0 , 0 )[1] # Return the solutions_count
if __name__ == "__main__":
import doctest
doctest.testmod()
| 672 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a : str = logging.get_logger(__name__)
a : Any = {
"""google/fnet-base""": """https://huggingface.co/google/fnet-base/resolve/main/config.json""",
"""google/fnet-large""": """https://huggingface.co/google/fnet-large/resolve/main/config.json"""
# See all FNet models at https://huggingface.co/models?filter=fnet
}
class UpperCamelCase_ ( __magic_name__ ):
lowercase = 'fnet'
def __init__( self , A=32000 , A=768 , A=12 , A=3072 , A="gelu_new" , A=0.1 , A=512 , A=4 , A=0.0_2 , A=1e-12 , A=False , A=512 , A=3 , A=1 , A=2 , **A , ) -> int:
super().__init__(pad_token_id=A , bos_token_id=A , eos_token_id=A , **A )
UpperCAmelCase : Optional[Any] = vocab_size
UpperCAmelCase : Dict = max_position_embeddings
UpperCAmelCase : List[str] = hidden_size
UpperCAmelCase : int = num_hidden_layers
UpperCAmelCase : Any = intermediate_size
UpperCAmelCase : List[str] = hidden_act
UpperCAmelCase : Tuple = hidden_dropout_prob
UpperCAmelCase : List[str] = initializer_range
UpperCAmelCase : List[Any] = type_vocab_size
UpperCAmelCase : int = layer_norm_eps
UpperCAmelCase : Optional[Any] = use_tpu_fourier_optimizations
UpperCAmelCase : List[Any] = tpu_short_seq_length
| 672 | 1 |
'''simple docstring'''
import re
from pathlib import Path
from unittest import TestCase
import pytest
@pytest.mark.integration
class UpperCamelCase_ ( __magic_name__ ):
def _lowercase( self , A ) -> Optional[int]:
with open(A , encoding="""utf-8""" ) as input_file:
UpperCAmelCase : int = re.compile(r"""(?!.*\b(?:encoding|rb|w|wb|w+|wb+|ab|ab+)\b)(?<=\s)(open)\((.*)\)""" )
UpperCAmelCase : Optional[Any] = input_file.read()
UpperCAmelCase : Tuple = regexp.search(A )
return match
def _lowercase( self , A ) -> Dict:
with open(A , encoding="""utf-8""" ) as input_file:
UpperCAmelCase : List[Any] = re.compile(r"""#[^\r\n]*print\(|\"[^\r\n]*print\(|\"\"\".*?print\(.*?\"\"\"|(print\()""" , re.DOTALL )
UpperCAmelCase : Union[str, Any] = input_file.read()
# use `re.finditer` to handle the case where the ignored groups would be matched first by `re.search`
UpperCAmelCase : Optional[int] = regexp.finditer(A )
UpperCAmelCase : str = [match for match in matches if match is not None and match.group(1 ) is not None]
return matches[0] if matches else None
def _lowercase( self ) -> List[str]:
UpperCAmelCase : Optional[int] = Path("""./datasets""" )
UpperCAmelCase : Tuple = list(dataset_paths.absolute().glob("""**/*.py""" ) )
for dataset in dataset_files:
if self._no_encoding_on_file_open(str(A ) ):
raise AssertionError(f'''open(...) must use utf-8 encoding in {dataset}''' )
def _lowercase( self ) -> Tuple:
UpperCAmelCase : str = Path("""./datasets""" )
UpperCAmelCase : Optional[int] = list(dataset_paths.absolute().glob("""**/*.py""" ) )
for dataset in dataset_files:
if self._no_print_statements(str(A ) ):
raise AssertionError(f'''print statement found in {dataset}. Use datasets.logger/logging instead.''' )
| 672 |
'''simple docstring'''
a : List[Any] = """Alexander Joslin"""
import operator as op
from .stack import Stack
def __lowerCamelCase ( _lowercase ) -> int:
UpperCAmelCase : Dict = {"""*""": op.mul, """/""": op.truediv, """+""": op.add, """-""": op.sub}
UpperCAmelCase : Stack[int] = Stack()
UpperCAmelCase : Stack[str] = Stack()
for i in equation:
if i.isdigit():
# RULE 1
operand_stack.push(int(_lowercase ) )
elif i in operators:
# RULE 2
operator_stack.push(_lowercase )
elif i == ")":
# RULE 4
UpperCAmelCase : List[Any] = operator_stack.peek()
operator_stack.pop()
UpperCAmelCase : str = operand_stack.peek()
operand_stack.pop()
UpperCAmelCase : str = operand_stack.peek()
operand_stack.pop()
UpperCAmelCase : List[Any] = operators[opr](_lowercase , _lowercase )
operand_stack.push(_lowercase )
# RULE 5
return operand_stack.peek()
if __name__ == "__main__":
a : Tuple = """(5 + ((4 * 2) * (2 + 3)))"""
# answer = 45
print(F'''{equation} = {dijkstras_two_stack_algorithm(equation)}''')
| 672 | 1 |
'''simple docstring'''
from __future__ import annotations
from decimal import Decimal
from numpy import array
def __lowerCamelCase ( _lowercase ) -> list[list[float]]:
UpperCAmelCase : Dict = Decimal
# Check if the provided matrix has 2 rows and 2 columns
# since this implementation only works for 2x2 matrices
if len(_lowercase ) == 2 and len(matrix[0] ) == 2 and len(matrix[1] ) == 2:
# Calculate the determinant of the matrix
UpperCAmelCase : Dict = float(
d(matrix[0][0] ) * d(matrix[1][1] ) - d(matrix[1][0] ) * d(matrix[0][1] ) )
if determinant == 0:
raise ValueError("""This matrix has no inverse.""" )
# Creates a copy of the matrix with swapped positions of the elements
UpperCAmelCase : int = [[0.0, 0.0], [0.0, 0.0]]
UpperCAmelCase , UpperCAmelCase : Any = matrix[1][1], matrix[0][0]
UpperCAmelCase , UpperCAmelCase : Any = -matrix[1][0], -matrix[0][1]
# Calculate the inverse of the matrix
return [
[(float(d(_lowercase ) ) / determinant) or 0.0 for n in row] for row in swapped_matrix
]
elif (
len(_lowercase ) == 3
and len(matrix[0] ) == 3
and len(matrix[1] ) == 3
and len(matrix[2] ) == 3
):
# Calculate the determinant of the matrix using Sarrus rule
UpperCAmelCase : Tuple = float(
(
(d(matrix[0][0] ) * d(matrix[1][1] ) * d(matrix[2][2] ))
+ (d(matrix[0][1] ) * d(matrix[1][2] ) * d(matrix[2][0] ))
+ (d(matrix[0][2] ) * d(matrix[1][0] ) * d(matrix[2][1] ))
)
- (
(d(matrix[0][2] ) * d(matrix[1][1] ) * d(matrix[2][0] ))
+ (d(matrix[0][1] ) * d(matrix[1][0] ) * d(matrix[2][2] ))
+ (d(matrix[0][0] ) * d(matrix[1][2] ) * d(matrix[2][1] ))
) )
if determinant == 0:
raise ValueError("""This matrix has no inverse.""" )
# Creating cofactor matrix
UpperCAmelCase : Optional[Any] = [
[d(0.0 ), d(0.0 ), d(0.0 )],
[d(0.0 ), d(0.0 ), d(0.0 )],
[d(0.0 ), d(0.0 ), d(0.0 )],
]
UpperCAmelCase : List[str] = (d(matrix[1][1] ) * d(matrix[2][2] )) - (
d(matrix[1][2] ) * d(matrix[2][1] )
)
UpperCAmelCase : int = -(
(d(matrix[1][0] ) * d(matrix[2][2] )) - (d(matrix[1][2] ) * d(matrix[2][0] ))
)
UpperCAmelCase : Optional[int] = (d(matrix[1][0] ) * d(matrix[2][1] )) - (
d(matrix[1][1] ) * d(matrix[2][0] )
)
UpperCAmelCase : List[Any] = -(
(d(matrix[0][1] ) * d(matrix[2][2] )) - (d(matrix[0][2] ) * d(matrix[2][1] ))
)
UpperCAmelCase : Dict = (d(matrix[0][0] ) * d(matrix[2][2] )) - (
d(matrix[0][2] ) * d(matrix[2][0] )
)
UpperCAmelCase : List[str] = -(
(d(matrix[0][0] ) * d(matrix[2][1] )) - (d(matrix[0][1] ) * d(matrix[2][0] ))
)
UpperCAmelCase : List[Any] = (d(matrix[0][1] ) * d(matrix[1][2] )) - (
d(matrix[0][2] ) * d(matrix[1][1] )
)
UpperCAmelCase : List[Any] = -(
(d(matrix[0][0] ) * d(matrix[1][2] )) - (d(matrix[0][2] ) * d(matrix[1][0] ))
)
UpperCAmelCase : Union[str, Any] = (d(matrix[0][0] ) * d(matrix[1][1] )) - (
d(matrix[0][1] ) * d(matrix[1][0] )
)
# Transpose the cofactor matrix (Adjoint matrix)
UpperCAmelCase : int = array(_lowercase )
for i in range(3 ):
for j in range(3 ):
UpperCAmelCase : Union[str, Any] = cofactor_matrix[j][i]
# Inverse of the matrix using the formula (1/determinant) * adjoint matrix
UpperCAmelCase : Dict = array(_lowercase )
for i in range(3 ):
for j in range(3 ):
inverse_matrix[i][j] /= d(_lowercase )
# Calculate the inverse of the matrix
return [[float(d(_lowercase ) ) or 0.0 for n in row] for row in inverse_matrix]
raise ValueError("""Please provide a matrix of size 2x2 or 3x3.""" )
| 672 |
'''simple docstring'''
import argparse
import json
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
ConditionalDetrConfig,
ConditionalDetrForObjectDetection,
ConditionalDetrForSegmentation,
ConditionalDetrImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
a : List[Any] = logging.get_logger(__name__)
# here we list all keys to be renamed (original name on the left, our name on the right)
a : List[str] = []
for i in range(6):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(F'''transformer.encoder.layers.{i}.self_attn.out_proj.weight''', F'''encoder.layers.{i}.self_attn.out_proj.weight''')
)
rename_keys.append(
(F'''transformer.encoder.layers.{i}.self_attn.out_proj.bias''', F'''encoder.layers.{i}.self_attn.out_proj.bias''')
)
rename_keys.append((F'''transformer.encoder.layers.{i}.linear1.weight''', F'''encoder.layers.{i}.fc1.weight'''))
rename_keys.append((F'''transformer.encoder.layers.{i}.linear1.bias''', F'''encoder.layers.{i}.fc1.bias'''))
rename_keys.append((F'''transformer.encoder.layers.{i}.linear2.weight''', F'''encoder.layers.{i}.fc2.weight'''))
rename_keys.append((F'''transformer.encoder.layers.{i}.linear2.bias''', F'''encoder.layers.{i}.fc2.bias'''))
rename_keys.append(
(F'''transformer.encoder.layers.{i}.norm1.weight''', F'''encoder.layers.{i}.self_attn_layer_norm.weight''')
)
rename_keys.append((F'''transformer.encoder.layers.{i}.norm1.bias''', F'''encoder.layers.{i}.self_attn_layer_norm.bias'''))
rename_keys.append((F'''transformer.encoder.layers.{i}.norm2.weight''', F'''encoder.layers.{i}.final_layer_norm.weight'''))
rename_keys.append((F'''transformer.encoder.layers.{i}.norm2.bias''', F'''encoder.layers.{i}.final_layer_norm.bias'''))
# decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms
rename_keys.append(
(F'''transformer.decoder.layers.{i}.self_attn.out_proj.weight''', F'''decoder.layers.{i}.self_attn.out_proj.weight''')
)
rename_keys.append(
(F'''transformer.decoder.layers.{i}.self_attn.out_proj.bias''', F'''decoder.layers.{i}.self_attn.out_proj.bias''')
)
rename_keys.append(
(
F'''transformer.decoder.layers.{i}.cross_attn.out_proj.weight''',
F'''decoder.layers.{i}.encoder_attn.out_proj.weight''',
)
)
rename_keys.append(
(
F'''transformer.decoder.layers.{i}.cross_attn.out_proj.bias''',
F'''decoder.layers.{i}.encoder_attn.out_proj.bias''',
)
)
rename_keys.append((F'''transformer.decoder.layers.{i}.linear1.weight''', F'''decoder.layers.{i}.fc1.weight'''))
rename_keys.append((F'''transformer.decoder.layers.{i}.linear1.bias''', F'''decoder.layers.{i}.fc1.bias'''))
rename_keys.append((F'''transformer.decoder.layers.{i}.linear2.weight''', F'''decoder.layers.{i}.fc2.weight'''))
rename_keys.append((F'''transformer.decoder.layers.{i}.linear2.bias''', F'''decoder.layers.{i}.fc2.bias'''))
rename_keys.append(
(F'''transformer.decoder.layers.{i}.norm1.weight''', F'''decoder.layers.{i}.self_attn_layer_norm.weight''')
)
rename_keys.append((F'''transformer.decoder.layers.{i}.norm1.bias''', F'''decoder.layers.{i}.self_attn_layer_norm.bias'''))
rename_keys.append(
(F'''transformer.decoder.layers.{i}.norm2.weight''', F'''decoder.layers.{i}.encoder_attn_layer_norm.weight''')
)
rename_keys.append(
(F'''transformer.decoder.layers.{i}.norm2.bias''', F'''decoder.layers.{i}.encoder_attn_layer_norm.bias''')
)
rename_keys.append((F'''transformer.decoder.layers.{i}.norm3.weight''', F'''decoder.layers.{i}.final_layer_norm.weight'''))
rename_keys.append((F'''transformer.decoder.layers.{i}.norm3.bias''', F'''decoder.layers.{i}.final_layer_norm.bias'''))
# q, k, v projections in self/cross-attention in decoder for conditional DETR
rename_keys.append(
(F'''transformer.decoder.layers.{i}.sa_qcontent_proj.weight''', F'''decoder.layers.{i}.sa_qcontent_proj.weight''')
)
rename_keys.append(
(F'''transformer.decoder.layers.{i}.sa_kcontent_proj.weight''', F'''decoder.layers.{i}.sa_kcontent_proj.weight''')
)
rename_keys.append(
(F'''transformer.decoder.layers.{i}.sa_qpos_proj.weight''', F'''decoder.layers.{i}.sa_qpos_proj.weight''')
)
rename_keys.append(
(F'''transformer.decoder.layers.{i}.sa_kpos_proj.weight''', F'''decoder.layers.{i}.sa_kpos_proj.weight''')
)
rename_keys.append((F'''transformer.decoder.layers.{i}.sa_v_proj.weight''', F'''decoder.layers.{i}.sa_v_proj.weight'''))
rename_keys.append(
(F'''transformer.decoder.layers.{i}.ca_qcontent_proj.weight''', F'''decoder.layers.{i}.ca_qcontent_proj.weight''')
)
# rename_keys.append((f"transformer.decoder.layers.{i}.ca_qpos_proj.weight", f"decoder.layers.{i}.ca_qpos_proj.weight"))
rename_keys.append(
(F'''transformer.decoder.layers.{i}.ca_kcontent_proj.weight''', F'''decoder.layers.{i}.ca_kcontent_proj.weight''')
)
rename_keys.append(
(F'''transformer.decoder.layers.{i}.ca_kpos_proj.weight''', F'''decoder.layers.{i}.ca_kpos_proj.weight''')
)
rename_keys.append((F'''transformer.decoder.layers.{i}.ca_v_proj.weight''', F'''decoder.layers.{i}.ca_v_proj.weight'''))
rename_keys.append(
(F'''transformer.decoder.layers.{i}.ca_qpos_sine_proj.weight''', F'''decoder.layers.{i}.ca_qpos_sine_proj.weight''')
)
rename_keys.append(
(F'''transformer.decoder.layers.{i}.sa_qcontent_proj.bias''', F'''decoder.layers.{i}.sa_qcontent_proj.bias''')
)
rename_keys.append(
(F'''transformer.decoder.layers.{i}.sa_kcontent_proj.bias''', F'''decoder.layers.{i}.sa_kcontent_proj.bias''')
)
rename_keys.append((F'''transformer.decoder.layers.{i}.sa_qpos_proj.bias''', F'''decoder.layers.{i}.sa_qpos_proj.bias'''))
rename_keys.append((F'''transformer.decoder.layers.{i}.sa_kpos_proj.bias''', F'''decoder.layers.{i}.sa_kpos_proj.bias'''))
rename_keys.append((F'''transformer.decoder.layers.{i}.sa_v_proj.bias''', F'''decoder.layers.{i}.sa_v_proj.bias'''))
rename_keys.append(
(F'''transformer.decoder.layers.{i}.ca_qcontent_proj.bias''', F'''decoder.layers.{i}.ca_qcontent_proj.bias''')
)
# rename_keys.append((f"transformer.decoder.layers.{i}.ca_qpos_proj.bias", f"decoder.layers.{i}.ca_qpos_proj.bias"))
rename_keys.append(
(F'''transformer.decoder.layers.{i}.ca_kcontent_proj.bias''', F'''decoder.layers.{i}.ca_kcontent_proj.bias''')
)
rename_keys.append((F'''transformer.decoder.layers.{i}.ca_kpos_proj.bias''', F'''decoder.layers.{i}.ca_kpos_proj.bias'''))
rename_keys.append((F'''transformer.decoder.layers.{i}.ca_v_proj.bias''', F'''decoder.layers.{i}.ca_v_proj.bias'''))
rename_keys.append(
(F'''transformer.decoder.layers.{i}.ca_qpos_sine_proj.bias''', F'''decoder.layers.{i}.ca_qpos_sine_proj.bias''')
)
# convolutional projection + query embeddings + layernorm of decoder + class and bounding box heads
# for conditional DETR, also convert reference point head and query scale MLP
rename_keys.extend(
[
("""input_proj.weight""", """input_projection.weight"""),
("""input_proj.bias""", """input_projection.bias"""),
("""query_embed.weight""", """query_position_embeddings.weight"""),
("""transformer.decoder.norm.weight""", """decoder.layernorm.weight"""),
("""transformer.decoder.norm.bias""", """decoder.layernorm.bias"""),
("""class_embed.weight""", """class_labels_classifier.weight"""),
("""class_embed.bias""", """class_labels_classifier.bias"""),
("""bbox_embed.layers.0.weight""", """bbox_predictor.layers.0.weight"""),
("""bbox_embed.layers.0.bias""", """bbox_predictor.layers.0.bias"""),
("""bbox_embed.layers.1.weight""", """bbox_predictor.layers.1.weight"""),
("""bbox_embed.layers.1.bias""", """bbox_predictor.layers.1.bias"""),
("""bbox_embed.layers.2.weight""", """bbox_predictor.layers.2.weight"""),
("""bbox_embed.layers.2.bias""", """bbox_predictor.layers.2.bias"""),
("""transformer.decoder.ref_point_head.layers.0.weight""", """decoder.ref_point_head.layers.0.weight"""),
("""transformer.decoder.ref_point_head.layers.0.bias""", """decoder.ref_point_head.layers.0.bias"""),
("""transformer.decoder.ref_point_head.layers.1.weight""", """decoder.ref_point_head.layers.1.weight"""),
("""transformer.decoder.ref_point_head.layers.1.bias""", """decoder.ref_point_head.layers.1.bias"""),
("""transformer.decoder.query_scale.layers.0.weight""", """decoder.query_scale.layers.0.weight"""),
("""transformer.decoder.query_scale.layers.0.bias""", """decoder.query_scale.layers.0.bias"""),
("""transformer.decoder.query_scale.layers.1.weight""", """decoder.query_scale.layers.1.weight"""),
("""transformer.decoder.query_scale.layers.1.bias""", """decoder.query_scale.layers.1.bias"""),
("""transformer.decoder.layers.0.ca_qpos_proj.weight""", """decoder.layers.0.ca_qpos_proj.weight"""),
("""transformer.decoder.layers.0.ca_qpos_proj.bias""", """decoder.layers.0.ca_qpos_proj.bias"""),
]
)
def __lowerCamelCase ( _lowercase , _lowercase , _lowercase ) -> Optional[Any]:
UpperCAmelCase : List[str] = state_dict.pop(_lowercase )
UpperCAmelCase : List[str] = val
def __lowerCamelCase ( _lowercase ) -> Any:
UpperCAmelCase : Union[str, Any] = OrderedDict()
for key, value in state_dict.items():
if "backbone.0.body" in key:
UpperCAmelCase : List[str] = key.replace("""backbone.0.body""" , """backbone.conv_encoder.model""" )
UpperCAmelCase : Dict = value
else:
UpperCAmelCase : List[Any] = value
return new_state_dict
def __lowerCamelCase ( _lowercase , _lowercase=False ) -> Optional[int]:
UpperCAmelCase : Dict = """"""
if is_panoptic:
UpperCAmelCase : Tuple = """conditional_detr."""
# first: transformer encoder
for i in range(6 ):
# read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias)
UpperCAmelCase : List[Any] = state_dict.pop(F'''{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight''' )
UpperCAmelCase : List[Any] = state_dict.pop(F'''{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias''' )
# next, add query, keys and values (in that order) to the state dict
UpperCAmelCase : Dict = in_proj_weight[:2_5_6, :]
UpperCAmelCase : Optional[Any] = in_proj_bias[:2_5_6]
UpperCAmelCase : List[Any] = in_proj_weight[2_5_6:5_1_2, :]
UpperCAmelCase : Tuple = in_proj_bias[2_5_6:5_1_2]
UpperCAmelCase : List[str] = in_proj_weight[-2_5_6:, :]
UpperCAmelCase : List[str] = in_proj_bias[-2_5_6:]
def __lowerCamelCase ( ) -> Dict:
UpperCAmelCase : List[Any] = """http://images.cocodataset.org/val2017/000000039769.jpg"""
UpperCAmelCase : Tuple = Image.open(requests.get(_lowercase , stream=_lowercase ).raw )
return im
@torch.no_grad()
def __lowerCamelCase ( _lowercase , _lowercase ) -> str:
UpperCAmelCase : str = ConditionalDetrConfig()
# set backbone and dilation attributes
if "resnet101" in model_name:
UpperCAmelCase : List[Any] = """resnet101"""
if "dc5" in model_name:
UpperCAmelCase : Optional[int] = True
UpperCAmelCase : List[Any] = """panoptic""" in model_name
if is_panoptic:
UpperCAmelCase : Union[str, Any] = 2_5_0
else:
UpperCAmelCase : int = 9_1
UpperCAmelCase : Tuple = """huggingface/label-files"""
UpperCAmelCase : List[Any] = """coco-detection-id2label.json"""
UpperCAmelCase : Optional[int] = json.load(open(hf_hub_download(_lowercase , _lowercase , repo_type="""dataset""" ) , """r""" ) )
UpperCAmelCase : Dict = {int(_lowercase ): v for k, v in idalabel.items()}
UpperCAmelCase : Optional[Any] = idalabel
UpperCAmelCase : List[Any] = {v: k for k, v in idalabel.items()}
# load image processor
UpperCAmelCase : List[str] = """coco_panoptic""" if is_panoptic else """coco_detection"""
UpperCAmelCase : List[Any] = ConditionalDetrImageProcessor(format=_lowercase )
# prepare image
UpperCAmelCase : Union[str, Any] = prepare_img()
UpperCAmelCase : Dict = image_processor(images=_lowercase , return_tensors="""pt""" )
UpperCAmelCase : List[Any] = encoding["""pixel_values"""]
logger.info(F'''Converting model {model_name}...''' )
# load original model from torch hub
UpperCAmelCase : int = torch.hub.load("""DeppMeng/ConditionalDETR""" , _lowercase , pretrained=_lowercase ).eval()
UpperCAmelCase : List[Any] = conditional_detr.state_dict()
# rename keys
for src, dest in rename_keys:
if is_panoptic:
UpperCAmelCase : List[Any] = """conditional_detr.""" + src
rename_key(_lowercase , _lowercase , _lowercase )
UpperCAmelCase : List[Any] = rename_backbone_keys(_lowercase )
# query, key and value matrices need special treatment
read_in_q_k_v(_lowercase , is_panoptic=_lowercase )
# important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them
UpperCAmelCase : int = """conditional_detr.model.""" if is_panoptic else """model."""
for key in state_dict.copy().keys():
if is_panoptic:
if (
key.startswith("""conditional_detr""" )
and not key.startswith("""class_labels_classifier""" )
and not key.startswith("""bbox_predictor""" )
):
UpperCAmelCase : Union[str, Any] = state_dict.pop(_lowercase )
UpperCAmelCase : int = val
elif "class_labels_classifier" in key or "bbox_predictor" in key:
UpperCAmelCase : Any = state_dict.pop(_lowercase )
UpperCAmelCase : Optional[Any] = val
elif key.startswith("""bbox_attention""" ) or key.startswith("""mask_head""" ):
continue
else:
UpperCAmelCase : List[Any] = state_dict.pop(_lowercase )
UpperCAmelCase : str = val
else:
if not key.startswith("""class_labels_classifier""" ) and not key.startswith("""bbox_predictor""" ):
UpperCAmelCase : Optional[int] = state_dict.pop(_lowercase )
UpperCAmelCase : Union[str, Any] = val
# finally, create HuggingFace model and load state dict
UpperCAmelCase : List[Any] = ConditionalDetrForSegmentation(_lowercase ) if is_panoptic else ConditionalDetrForObjectDetection(_lowercase )
model.load_state_dict(_lowercase )
model.eval()
model.push_to_hub(repo_id=_lowercase , organization="""DepuMeng""" , commit_message="""Add model""" )
# verify our conversion
UpperCAmelCase : Union[str, Any] = conditional_detr(_lowercase )
UpperCAmelCase : int = model(_lowercase )
assert torch.allclose(outputs.logits , original_outputs["""pred_logits"""] , atol=1e-4 )
assert torch.allclose(outputs.pred_boxes , original_outputs["""pred_boxes"""] , atol=1e-4 )
if is_panoptic:
assert torch.allclose(outputs.pred_masks , original_outputs["""pred_masks"""] , atol=1e-4 )
# Save model and image processor
logger.info(F'''Saving PyTorch model and image processor to {pytorch_dump_folder_path}...''' )
Path(_lowercase ).mkdir(exist_ok=_lowercase )
model.save_pretrained(_lowercase )
image_processor.save_pretrained(_lowercase )
if __name__ == "__main__":
a : Tuple = argparse.ArgumentParser()
parser.add_argument(
"""--model_name""",
default="""conditional_detr_resnet50""",
type=str,
help="""Name of the CONDITIONAL_DETR model you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the folder to output PyTorch model."""
)
a : Optional[Any] = parser.parse_args()
convert_conditional_detr_checkpoint(args.model_name, args.pytorch_dump_folder_path)
| 672 | 1 |
'''simple docstring'''
from sklearn.metrics import fa_score, matthews_corrcoef
import datasets
from .record_evaluation import evaluate as evaluate_record
a : Union[str, Any] = """\
@article{wang2019superglue,
title={SuperGLUE: A Stickier Benchmark for General-Purpose Language Understanding Systems},
author={Wang, Alex and Pruksachatkun, Yada and Nangia, Nikita and Singh, Amanpreet and Michael, Julian and Hill, Felix and Levy, Omer and Bowman, Samuel R},
journal={arXiv preprint arXiv:1905.00537},
year={2019}
}
"""
a : List[str] = """\
SuperGLUE (https://super.gluebenchmark.com/) is a new benchmark styled after
GLUE with a new set of more difficult language understanding tasks, improved
resources, and a new public leaderboard.
"""
a : Tuple = """
Compute SuperGLUE evaluation metric associated to each SuperGLUE dataset.
Args:
predictions: list of predictions to score. Depending on the SuperGlUE subset:
- for 'record': list of question-answer dictionaries with the following keys:
- 'idx': index of the question as specified by the dataset
- 'prediction_text': the predicted answer text
- for 'multirc': list of question-answer dictionaries with the following keys:
- 'idx': index of the question-answer pair as specified by the dataset
- 'prediction': the predicted answer label
- otherwise: list of predicted labels
references: list of reference labels. Depending on the SuperGLUE subset:
- for 'record': list of question-answers dictionaries with the following keys:
- 'idx': index of the question as specified by the dataset
- 'answers': list of possible answers
- otherwise: list of reference labels
Returns: depending on the SuperGLUE subset:
- for 'record':
- 'exact_match': Exact match between answer and gold answer
- 'f1': F1 score
- for 'multirc':
- 'exact_match': Exact match between answer and gold answer
- 'f1_m': Per-question macro-F1 score
- 'f1_a': Average F1 score over all answers
- for 'axb':
'matthews_correlation': Matthew Correlation
- for 'cb':
- 'accuracy': Accuracy
- 'f1': F1 score
- for all others:
- 'accuracy': Accuracy
Examples:
>>> super_glue_metric = datasets.load_metric('super_glue', 'copa') # any of [\"copa\", \"rte\", \"wic\", \"wsc\", \"wsc.fixed\", \"boolq\", \"axg\"]
>>> predictions = [0, 1]
>>> references = [0, 1]
>>> results = super_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{'accuracy': 1.0}
>>> super_glue_metric = datasets.load_metric('super_glue', 'cb')
>>> predictions = [0, 1]
>>> references = [0, 1]
>>> results = super_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{'accuracy': 1.0, 'f1': 1.0}
>>> super_glue_metric = datasets.load_metric('super_glue', 'record')
>>> predictions = [{'idx': {'passage': 0, 'query': 0}, 'prediction_text': 'answer'}]
>>> references = [{'idx': {'passage': 0, 'query': 0}, 'answers': ['answer', 'another_answer']}]
>>> results = super_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{'exact_match': 1.0, 'f1': 1.0}
>>> super_glue_metric = datasets.load_metric('super_glue', 'multirc')
>>> predictions = [{'idx': {'answer': 0, 'paragraph': 0, 'question': 0}, 'prediction': 0}, {'idx': {'answer': 1, 'paragraph': 2, 'question': 3}, 'prediction': 1}]
>>> references = [0, 1]
>>> results = super_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{'exact_match': 1.0, 'f1_m': 1.0, 'f1_a': 1.0}
>>> super_glue_metric = datasets.load_metric('super_glue', 'axb')
>>> references = [0, 1]
>>> predictions = [0, 1]
>>> results = super_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{'matthews_correlation': 1.0}
"""
def __lowerCamelCase ( _lowercase , _lowercase ) -> Optional[int]:
return float((preds == labels).mean() )
def __lowerCamelCase ( _lowercase , _lowercase , _lowercase="binary" ) -> List[str]:
UpperCAmelCase : Optional[int] = simple_accuracy(_lowercase , _lowercase )
UpperCAmelCase : List[Any] = float(fa_score(y_true=_lowercase , y_pred=_lowercase , average=_lowercase ) )
return {
"accuracy": acc,
"f1": fa,
}
def __lowerCamelCase ( _lowercase , _lowercase ) -> Optional[int]:
UpperCAmelCase : Any = {}
for id_pred, label in zip(_lowercase , _lowercase ):
UpperCAmelCase : List[str] = F'''{id_pred['idx']['paragraph']}-{id_pred['idx']['question']}'''
UpperCAmelCase : int = id_pred["""prediction"""]
if question_id in question_map:
question_map[question_id].append((pred, label) )
else:
UpperCAmelCase : Any = [(pred, label)]
UpperCAmelCase , UpperCAmelCase : Optional[Any] = [], []
for question, preds_labels in question_map.items():
UpperCAmelCase , UpperCAmelCase : Optional[Any] = zip(*_lowercase )
UpperCAmelCase : int = fa_score(y_true=_lowercase , y_pred=_lowercase , average="""macro""" )
fas.append(_lowercase )
UpperCAmelCase : Any = int(sum(pred == label for pred, label in preds_labels ) == len(_lowercase ) )
ems.append(_lowercase )
UpperCAmelCase : List[str] = float(sum(_lowercase ) / len(_lowercase ) )
UpperCAmelCase : List[Any] = sum(_lowercase ) / len(_lowercase )
UpperCAmelCase : Tuple = float(fa_score(y_true=_lowercase , y_pred=[id_pred["""prediction"""] for id_pred in ids_preds] ) )
return {"exact_match": em, "f1_m": fa_m, "f1_a": fa_a}
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class UpperCamelCase_ ( datasets.Metric ):
def _lowercase( self ) -> Any:
if self.config_name not in [
"boolq",
"cb",
"copa",
"multirc",
"record",
"rte",
"wic",
"wsc",
"wsc.fixed",
"axb",
"axg",
]:
raise KeyError(
"""You should supply a configuration name selected in """
"""[\"boolq\", \"cb\", \"copa\", \"multirc\", \"record\", \"rte\", \"wic\", \"wsc\", \"wsc.fixed\", \"axb\", \"axg\",]""" )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(self._get_feature_types() ) , codebase_urls=[] , reference_urls=[] , format="""numpy""" if not self.config_name == """record""" and not self.config_name == """multirc""" else None , )
def _lowercase( self ) -> Dict:
if self.config_name == "record":
return {
"predictions": {
"idx": {
"passage": datasets.Value("""int64""" ),
"query": datasets.Value("""int64""" ),
},
"prediction_text": datasets.Value("""string""" ),
},
"references": {
"idx": {
"passage": datasets.Value("""int64""" ),
"query": datasets.Value("""int64""" ),
},
"answers": datasets.Sequence(datasets.Value("""string""" ) ),
},
}
elif self.config_name == "multirc":
return {
"predictions": {
"idx": {
"answer": datasets.Value("""int64""" ),
"paragraph": datasets.Value("""int64""" ),
"question": datasets.Value("""int64""" ),
},
"prediction": datasets.Value("""int64""" ),
},
"references": datasets.Value("""int64""" ),
}
else:
return {
"predictions": datasets.Value("""int64""" ),
"references": datasets.Value("""int64""" ),
}
def _lowercase( self , A , A ) -> Tuple:
if self.config_name == "axb":
return {"matthews_correlation": matthews_corrcoef(A , A )}
elif self.config_name == "cb":
return acc_and_fa(A , A , fa_avg="""macro""" )
elif self.config_name == "record":
UpperCAmelCase : List[str] = [
{
"""qas""": [
{"""id""": ref["""idx"""]["""query"""], """answers""": [{"""text""": ans} for ans in ref["""answers"""]]}
for ref in references
]
}
]
UpperCAmelCase : List[Any] = {pred["""idx"""]["""query"""]: pred["""prediction_text"""] for pred in predictions}
return evaluate_record(A , A )[0]
elif self.config_name == "multirc":
return evaluate_multirc(A , A )
elif self.config_name in ["copa", "rte", "wic", "wsc", "wsc.fixed", "boolq", "axg"]:
return {"accuracy": simple_accuracy(A , A )}
else:
raise KeyError(
"""You should supply a configuration name selected in """
"""[\"boolq\", \"cb\", \"copa\", \"multirc\", \"record\", \"rte\", \"wic\", \"wsc\", \"wsc.fixed\", \"axb\", \"axg\",]""" )
| 672 |
'''simple docstring'''
from ...utils import is_torch_available, is_transformers_available
if is_transformers_available() and is_torch_available():
from .pipeline_vq_diffusion import LearnedClassifierFreeSamplingEmbeddings, VQDiffusionPipeline
| 672 | 1 |
'''simple docstring'''
import unittest
import torch
from torch import nn
from accelerate.test_utils import require_cuda
from accelerate.utils.memory import find_executable_batch_size, release_memory
def __lowerCamelCase ( ) -> str:
raise RuntimeError("""CUDA out of memory.""" )
class UpperCamelCase_ ( nn.Module ):
def __init__( self ) -> Tuple:
super().__init__()
UpperCAmelCase : Tuple = nn.Linear(3 , 4 )
UpperCAmelCase : int = nn.BatchNormad(4 )
UpperCAmelCase : int = nn.Linear(4 , 5 )
def _lowercase( self , A ) -> int:
return self.lineara(self.batchnorm(self.lineara(A ) ) )
class UpperCamelCase_ ( unittest.TestCase ):
def _lowercase( self ) -> Optional[Any]:
UpperCAmelCase : List[str] = []
@find_executable_batch_size(starting_batch_size=128 )
def mock_training_loop_function(A ):
nonlocal batch_sizes
batch_sizes.append(A )
if batch_size != 8:
raise_fake_out_of_memory()
mock_training_loop_function()
self.assertListEqual(A , [128, 64, 32, 16, 8] )
def _lowercase( self ) -> List[Any]:
UpperCAmelCase : Optional[int] = []
@find_executable_batch_size(starting_batch_size=128 )
def mock_training_loop_function(A , A ):
nonlocal batch_sizes
batch_sizes.append(A )
if batch_size != 8:
raise_fake_out_of_memory()
return batch_size, arga
UpperCAmelCase , UpperCAmelCase : List[Any] = mock_training_loop_function("""hello""" )
self.assertListEqual(A , [128, 64, 32, 16, 8] )
self.assertListEqual([bs, arga] , [8, """hello"""] )
def _lowercase( self ) -> int:
@find_executable_batch_size(starting_batch_size=0 )
def mock_training_loop_function(A ):
pass
with self.assertRaises(A ) as cm:
mock_training_loop_function()
self.assertIn("""No executable batch size found, reached zero.""" , cm.exception.args[0] )
def _lowercase( self ) -> List[str]:
@find_executable_batch_size(starting_batch_size=16 )
def mock_training_loop_function(A ):
if batch_size > 0:
raise_fake_out_of_memory()
pass
with self.assertRaises(A ) as cm:
mock_training_loop_function()
self.assertIn("""No executable batch size found, reached zero.""" , cm.exception.args[0] )
def _lowercase( self ) -> Optional[Any]:
@find_executable_batch_size(starting_batch_size=128 )
def mock_training_loop_function(A , A , A ):
if batch_size != 8:
raise raise_fake_out_of_memory()
with self.assertRaises(A ) as cm:
mock_training_loop_function(128 , """hello""" , """world""" )
self.assertIn("""Batch size was passed into `f`""" , cm.exception.args[0] )
self.assertIn("""`f(arg1='hello', arg2='world')""" , cm.exception.args[0] )
def _lowercase( self ) -> List[str]:
@find_executable_batch_size(starting_batch_size=16 )
def mock_training_loop_function(A ):
raise ValueError("""Oops, we had an error!""" )
with self.assertRaises(A ) as cm:
mock_training_loop_function()
self.assertIn("""Oops, we had an error!""" , cm.exception.args[0] )
@require_cuda
def _lowercase( self ) -> Dict:
UpperCAmelCase : Optional[Any] = torch.cuda.memory_allocated()
UpperCAmelCase : List[Any] = ModelForTest()
model.cuda()
self.assertGreater(torch.cuda.memory_allocated() , A )
UpperCAmelCase : Optional[Any] = release_memory(A )
self.assertEqual(torch.cuda.memory_allocated() , A )
| 672 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_barthez import BarthezTokenizer
else:
a : int = None
a : List[Any] = logging.get_logger(__name__)
a : Dict = {"""vocab_file""": """sentencepiece.bpe.model""", """tokenizer_file""": """tokenizer.json"""}
a : Union[str, Any] = {
"""vocab_file""": {
"""moussaKam/mbarthez""": """https://huggingface.co/moussaKam/mbarthez/resolve/main/sentencepiece.bpe.model""",
"""moussaKam/barthez""": """https://huggingface.co/moussaKam/barthez/resolve/main/sentencepiece.bpe.model""",
"""moussaKam/barthez-orangesum-title""": (
"""https://huggingface.co/moussaKam/barthez-orangesum-title/resolve/main/sentencepiece.bpe.model"""
),
},
"""tokenizer_file""": {
"""moussaKam/mbarthez""": """https://huggingface.co/moussaKam/mbarthez/resolve/main/tokenizer.json""",
"""moussaKam/barthez""": """https://huggingface.co/moussaKam/barthez/resolve/main/tokenizer.json""",
"""moussaKam/barthez-orangesum-title""": (
"""https://huggingface.co/moussaKam/barthez-orangesum-title/resolve/main/tokenizer.json"""
),
},
}
a : List[Any] = {
"""moussaKam/mbarthez""": 1_0_2_4,
"""moussaKam/barthez""": 1_0_2_4,
"""moussaKam/barthez-orangesum-title""": 1_0_2_4,
}
a : int = """▁"""
class UpperCamelCase_ ( __magic_name__ ):
lowercase = VOCAB_FILES_NAMES
lowercase = PRETRAINED_VOCAB_FILES_MAP
lowercase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase = ['input_ids', 'attention_mask']
lowercase = BarthezTokenizer
def __init__( self , A=None , A=None , A="<s>" , A="</s>" , A="</s>" , A="<s>" , A="<unk>" , A="<pad>" , A="<mask>" , **A , ) -> List[Any]:
# Mask token behave like a normal word, i.e. include the space before it
UpperCAmelCase : Tuple = AddedToken(A , lstrip=A , rstrip=A ) if isinstance(A , A ) else mask_token
super().__init__(
A , tokenizer_file=A , bos_token=A , eos_token=A , unk_token=A , sep_token=A , cls_token=A , pad_token=A , mask_token=A , **A , )
UpperCAmelCase : Union[str, Any] = vocab_file
UpperCAmelCase : int = False if not self.vocab_file else True
def _lowercase( self , A , A = None ) -> List[int]:
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
UpperCAmelCase : Optional[int] = [self.cls_token_id]
UpperCAmelCase : List[str] = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def _lowercase( self , A , A = None ) -> List[int]:
UpperCAmelCase : Optional[int] = [self.sep_token_id]
UpperCAmelCase : List[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def _lowercase( self , A , A = None ) -> Tuple[str]:
if not self.can_save_slow_tokenizer:
raise ValueError(
"""Your fast tokenizer does not have the necessary information to save the vocabulary for a slow """
"""tokenizer.""" )
if not os.path.isdir(A ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
UpperCAmelCase : str = os.path.join(
A , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(A ):
copyfile(self.vocab_file , A )
return (out_vocab_file,)
| 672 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tensorflow_text_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
a : Optional[Any] = {
"""configuration_bert""": ["""BERT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """BertConfig""", """BertOnnxConfig"""],
"""tokenization_bert""": ["""BasicTokenizer""", """BertTokenizer""", """WordpieceTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : Dict = ["""BertTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : List[Any] = [
"""BERT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""BertForMaskedLM""",
"""BertForMultipleChoice""",
"""BertForNextSentencePrediction""",
"""BertForPreTraining""",
"""BertForQuestionAnswering""",
"""BertForSequenceClassification""",
"""BertForTokenClassification""",
"""BertLayer""",
"""BertLMHeadModel""",
"""BertModel""",
"""BertPreTrainedModel""",
"""load_tf_weights_in_bert""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : List[str] = [
"""TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFBertEmbeddings""",
"""TFBertForMaskedLM""",
"""TFBertForMultipleChoice""",
"""TFBertForNextSentencePrediction""",
"""TFBertForPreTraining""",
"""TFBertForQuestionAnswering""",
"""TFBertForSequenceClassification""",
"""TFBertForTokenClassification""",
"""TFBertLMHeadModel""",
"""TFBertMainLayer""",
"""TFBertModel""",
"""TFBertPreTrainedModel""",
]
try:
if not is_tensorflow_text_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : int = ["""TFBertTokenizer"""]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : Optional[Any] = [
"""FlaxBertForCausalLM""",
"""FlaxBertForMaskedLM""",
"""FlaxBertForMultipleChoice""",
"""FlaxBertForNextSentencePrediction""",
"""FlaxBertForPreTraining""",
"""FlaxBertForQuestionAnswering""",
"""FlaxBertForSequenceClassification""",
"""FlaxBertForTokenClassification""",
"""FlaxBertModel""",
"""FlaxBertPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_bert import BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, BertConfig, BertOnnxConfig
from .tokenization_bert import BasicTokenizer, BertTokenizer, WordpieceTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bert_fast import BertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_bert import (
BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
BertForMaskedLM,
BertForMultipleChoice,
BertForNextSentencePrediction,
BertForPreTraining,
BertForQuestionAnswering,
BertForSequenceClassification,
BertForTokenClassification,
BertLayer,
BertLMHeadModel,
BertModel,
BertPreTrainedModel,
load_tf_weights_in_bert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_bert import (
TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFBertEmbeddings,
TFBertForMaskedLM,
TFBertForMultipleChoice,
TFBertForNextSentencePrediction,
TFBertForPreTraining,
TFBertForQuestionAnswering,
TFBertForSequenceClassification,
TFBertForTokenClassification,
TFBertLMHeadModel,
TFBertMainLayer,
TFBertModel,
TFBertPreTrainedModel,
)
try:
if not is_tensorflow_text_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bert_tf import TFBertTokenizer
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_bert import (
FlaxBertForCausalLM,
FlaxBertForMaskedLM,
FlaxBertForMultipleChoice,
FlaxBertForNextSentencePrediction,
FlaxBertForPreTraining,
FlaxBertForQuestionAnswering,
FlaxBertForSequenceClassification,
FlaxBertForTokenClassification,
FlaxBertModel,
FlaxBertPreTrainedModel,
)
else:
import sys
a : Optional[int] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 672 |
'''simple docstring'''
from collections.abc import Callable
import numpy as np
def __lowerCamelCase ( _lowercase , _lowercase , _lowercase , _lowercase , _lowercase ) -> np.array:
UpperCAmelCase : Optional[Any] = int(np.ceil((x_end - xa) / step_size ) )
UpperCAmelCase : str = np.zeros((n + 1,) )
UpperCAmelCase : Optional[Any] = ya
UpperCAmelCase : Union[str, Any] = xa
for k in range(_lowercase ):
UpperCAmelCase : Dict = y[k] + step_size * ode_func(_lowercase , y[k] )
UpperCAmelCase : Optional[int] = y[k] + (
(step_size / 2) * (ode_func(_lowercase , y[k] ) + ode_func(x + step_size , _lowercase ))
)
x += step_size
return y
if __name__ == "__main__":
import doctest
doctest.testmod()
| 672 | 1 |
'''simple docstring'''
from typing import Dict, Iterable, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import normalize, rescale, resize, to_channel_dimension_format, to_pil_image
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_pytesseract_available, is_vision_available, logging, requires_backends
if is_vision_available():
import PIL
# soft dependency
if is_pytesseract_available():
import pytesseract
a : Tuple = logging.get_logger(__name__)
def __lowerCamelCase ( _lowercase , _lowercase , _lowercase ) -> List[str]:
return [
int(1_0_0_0 * (box[0] / width) ),
int(1_0_0_0 * (box[1] / height) ),
int(1_0_0_0 * (box[2] / width) ),
int(1_0_0_0 * (box[3] / height) ),
]
def __lowerCamelCase ( _lowercase , _lowercase , _lowercase ) -> str:
UpperCAmelCase : List[Any] = to_pil_image(_lowercase )
UpperCAmelCase , UpperCAmelCase : Dict = pil_image.size
UpperCAmelCase : Any = pytesseract.image_to_data(_lowercase , lang=_lowercase , output_type="""dict""" , config=_lowercase )
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : List[str] = data["""text"""], data["""left"""], data["""top"""], data["""width"""], data["""height"""]
# filter empty words and corresponding coordinates
UpperCAmelCase : List[str] = [idx for idx, word in enumerate(_lowercase ) if not word.strip()]
UpperCAmelCase : Any = [word for idx, word in enumerate(_lowercase ) if idx not in irrelevant_indices]
UpperCAmelCase : Optional[int] = [coord for idx, coord in enumerate(_lowercase ) if idx not in irrelevant_indices]
UpperCAmelCase : List[str] = [coord for idx, coord in enumerate(_lowercase ) if idx not in irrelevant_indices]
UpperCAmelCase : Any = [coord for idx, coord in enumerate(_lowercase ) if idx not in irrelevant_indices]
UpperCAmelCase : int = [coord for idx, coord in enumerate(_lowercase ) if idx not in irrelevant_indices]
# turn coordinates into (left, top, left+width, top+height) format
UpperCAmelCase : Optional[Any] = []
for x, y, w, h in zip(_lowercase , _lowercase , _lowercase , _lowercase ):
UpperCAmelCase : Any = [x, y, x + w, y + h]
actual_boxes.append(_lowercase )
# finally, normalize the bounding boxes
UpperCAmelCase : Optional[Any] = []
for box in actual_boxes:
normalized_boxes.append(normalize_box(_lowercase , _lowercase , _lowercase ) )
assert len(_lowercase ) == len(_lowercase ), "Not as many words as there are bounding boxes"
return words, normalized_boxes
class UpperCamelCase_ ( __magic_name__ ):
lowercase = ['pixel_values']
def __init__( self , A = True , A = None , A = PILImageResampling.BILINEAR , A = True , A = 1 / 255 , A = True , A = None , A = None , A = True , A = None , A = "" , **A , ) -> None:
super().__init__(**A )
UpperCAmelCase : Tuple = size if size is not None else {"""height""": 224, """width""": 224}
UpperCAmelCase : Optional[int] = get_size_dict(A )
UpperCAmelCase : Optional[int] = do_resize
UpperCAmelCase : Union[str, Any] = size
UpperCAmelCase : Union[str, Any] = resample
UpperCAmelCase : List[Any] = do_rescale
UpperCAmelCase : Tuple = rescale_value
UpperCAmelCase : Any = do_normalize
UpperCAmelCase : Dict = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
UpperCAmelCase : int = image_std if image_std is not None else IMAGENET_STANDARD_STD
UpperCAmelCase : List[Any] = apply_ocr
UpperCAmelCase : Dict = ocr_lang
UpperCAmelCase : List[Any] = tesseract_config
def _lowercase( self , A , A , A = PILImageResampling.BILINEAR , A = None , **A , ) -> np.ndarray:
UpperCAmelCase : Optional[int] = get_size_dict(A )
if "height" not in size or "width" not in size:
raise ValueError(f'''The size dictionary must contain the keys \'height\' and \'width\'. Got {size.keys()}''' )
UpperCAmelCase : List[str] = (size["""height"""], size["""width"""])
return resize(A , size=A , resample=A , data_format=A , **A )
def _lowercase( self , A , A , A = None , **A , ) -> np.ndarray:
return rescale(A , scale=A , data_format=A , **A )
def _lowercase( self , A , A , A , A = None , **A , ) -> np.ndarray:
return normalize(A , mean=A , std=A , data_format=A , **A )
def _lowercase( self , A , A = None , A = None , A=None , A = None , A = None , A = None , A = None , A = None , A = None , A = None , A = None , A = None , A = ChannelDimension.FIRST , **A , ) -> PIL.Image.Image:
UpperCAmelCase : str = do_resize if do_resize is not None else self.do_resize
UpperCAmelCase : Any = size if size is not None else self.size
UpperCAmelCase : str = get_size_dict(A )
UpperCAmelCase : Dict = resample if resample is not None else self.resample
UpperCAmelCase : Any = do_rescale if do_rescale is not None else self.do_rescale
UpperCAmelCase : Tuple = rescale_factor if rescale_factor is not None else self.rescale_factor
UpperCAmelCase : Dict = do_normalize if do_normalize is not None else self.do_normalize
UpperCAmelCase : Optional[int] = image_mean if image_mean is not None else self.image_mean
UpperCAmelCase : Optional[Any] = image_std if image_std is not None else self.image_std
UpperCAmelCase : List[Any] = apply_ocr if apply_ocr is not None else self.apply_ocr
UpperCAmelCase : Any = ocr_lang if ocr_lang is not None else self.ocr_lang
UpperCAmelCase : List[str] = tesseract_config if tesseract_config is not None else self.tesseract_config
UpperCAmelCase : Optional[Any] = make_list_of_images(A )
if not valid_images(A ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_resize and size is None:
raise ValueError("""Size must be specified if do_resize is True.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("""If do_normalize is True, image_mean and image_std must be specified.""" )
# All transformations expect numpy arrays.
UpperCAmelCase : Optional[Any] = [to_numpy_array(A ) for image in images]
# Tesseract OCR to get words + normalized bounding boxes
if apply_ocr:
requires_backends(self , """pytesseract""" )
UpperCAmelCase : List[str] = []
UpperCAmelCase : Dict = []
for image in images:
UpperCAmelCase , UpperCAmelCase : Optional[Any] = apply_tesseract(A , A , A )
words_batch.append(A )
boxes_batch.append(A )
if do_resize:
UpperCAmelCase : Any = [self.resize(image=A , size=A , resample=A ) for image in images]
if do_rescale:
UpperCAmelCase : str = [self.rescale(image=A , scale=A ) for image in images]
if do_normalize:
UpperCAmelCase : Union[str, Any] = [self.normalize(image=A , mean=A , std=A ) for image in images]
UpperCAmelCase : int = [to_channel_dimension_format(A , A ) for image in images]
UpperCAmelCase : Any = BatchFeature(data={"""pixel_values""": images} , tensor_type=A )
if apply_ocr:
UpperCAmelCase : Optional[int] = words_batch
UpperCAmelCase : str = boxes_batch
return data
| 672 |
'''simple docstring'''
from typing import List, Union
import numpy as np
from ..tokenization_utils import TruncationStrategy
from ..utils import add_end_docstrings, logging
from .base import PIPELINE_INIT_ARGS, ArgumentHandler, ChunkPipeline
a : List[str] = logging.get_logger(__name__)
class UpperCamelCase_ ( __magic_name__ ):
def _lowercase( self , A ) -> Optional[int]:
if isinstance(A , A ):
UpperCAmelCase : Union[str, Any] = [label.strip() for label in labels.split(""",""" ) if label.strip()]
return labels
def __call__( self , A , A , A ) -> str:
if len(A ) == 0 or len(A ) == 0:
raise ValueError("""You must include at least one label and at least one sequence.""" )
if hypothesis_template.format(labels[0] ) == hypothesis_template:
raise ValueError(
(
"""The provided hypothesis_template \"{}\" was not able to be formatted with the target labels. """
"""Make sure the passed template includes formatting syntax such as {{}} where the label should go."""
).format(A ) )
if isinstance(A , A ):
UpperCAmelCase : Tuple = [sequences]
UpperCAmelCase : Optional[Any] = []
for sequence in sequences:
sequence_pairs.extend([[sequence, hypothesis_template.format(A )] for label in labels] )
return sequence_pairs, sequences
@add_end_docstrings(__magic_name__ )
class UpperCamelCase_ ( __magic_name__ ):
def __init__( self , A=ZeroShotClassificationArgumentHandler() , *A , **A ) -> Optional[int]:
UpperCAmelCase : Tuple = args_parser
super().__init__(*A , **A )
if self.entailment_id == -1:
logger.warning(
"""Failed to determine 'entailment' label id from the label2id mapping in the model config. Setting to """
"""-1. Define a descriptive label2id mapping in the model config to ensure correct outputs.""" )
@property
def _lowercase( self ) -> List[Any]:
for label, ind in self.model.config.labelaid.items():
if label.lower().startswith("""entail""" ):
return ind
return -1
def _lowercase( self , A , A=True , A=True , A=TruncationStrategy.ONLY_FIRST , **A ) -> str:
UpperCAmelCase : Tuple = self.framework
if self.tokenizer.pad_token is None:
# Override for tokenizers not supporting padding
logger.error(
"""Tokenizer was not supporting padding necessary for zero-shot, attempting to use """
""" `pad_token=eos_token`""" )
UpperCAmelCase : Any = self.tokenizer.eos_token
try:
UpperCAmelCase : Tuple = self.tokenizer(
A , add_special_tokens=A , return_tensors=A , padding=A , truncation=A , )
except Exception as e:
if "too short" in str(A ):
# tokenizers might yell that we want to truncate
# to a value that is not even reached by the input.
# In that case we don't want to truncate.
# It seems there's not a really better way to catch that
# exception.
UpperCAmelCase : List[str] = self.tokenizer(
A , add_special_tokens=A , return_tensors=A , padding=A , truncation=TruncationStrategy.DO_NOT_TRUNCATE , )
else:
raise e
return inputs
def _lowercase( self , **A ) -> Tuple:
if kwargs.get("""multi_class""" , A ) is not None:
UpperCAmelCase : Any = kwargs["""multi_class"""]
logger.warning(
"""The `multi_class` argument has been deprecated and renamed to `multi_label`. """
"""`multi_class` will be removed in a future version of Transformers.""" )
UpperCAmelCase : int = {}
if "candidate_labels" in kwargs:
UpperCAmelCase : Tuple = self._args_parser._parse_labels(kwargs["""candidate_labels"""] )
if "hypothesis_template" in kwargs:
UpperCAmelCase : List[Any] = kwargs["""hypothesis_template"""]
UpperCAmelCase : Dict = {}
if "multi_label" in kwargs:
UpperCAmelCase : Union[str, Any] = kwargs["""multi_label"""]
return preprocess_params, {}, postprocess_params
def __call__( self , A , *A , **A , ) -> Tuple:
if len(A ) == 0:
pass
elif len(A ) == 1 and "candidate_labels" not in kwargs:
UpperCAmelCase : Optional[Any] = args[0]
else:
raise ValueError(f'''Unable to understand extra arguments {args}''' )
return super().__call__(A , **A )
def _lowercase( self , A , A=None , A="This example is {}." ) -> List[Any]:
UpperCAmelCase , UpperCAmelCase : List[Any] = self._args_parser(A , A , A )
for i, (candidate_label, sequence_pair) in enumerate(zip(A , A ) ):
UpperCAmelCase : Any = self._parse_and_tokenize([sequence_pair] )
yield {
"candidate_label": candidate_label,
"sequence": sequences[0],
"is_last": i == len(A ) - 1,
**model_input,
}
def _lowercase( self , A ) -> Optional[int]:
UpperCAmelCase : Optional[Any] = inputs["""candidate_label"""]
UpperCAmelCase : Tuple = inputs["""sequence"""]
UpperCAmelCase : List[Any] = {k: inputs[k] for k in self.tokenizer.model_input_names}
UpperCAmelCase : Tuple = self.model(**A )
UpperCAmelCase : Optional[int] = {
"""candidate_label""": candidate_label,
"""sequence""": sequence,
"""is_last""": inputs["""is_last"""],
**outputs,
}
return model_outputs
def _lowercase( self , A , A=False ) -> List[str]:
UpperCAmelCase : Dict = [outputs["""candidate_label"""] for outputs in model_outputs]
UpperCAmelCase : List[Any] = [outputs["""sequence"""] for outputs in model_outputs]
UpperCAmelCase : List[Any] = np.concatenate([output["""logits"""].numpy() for output in model_outputs] )
UpperCAmelCase : Optional[Any] = logits.shape[0]
UpperCAmelCase : int = len(A )
UpperCAmelCase : List[Any] = N // n
UpperCAmelCase : int = logits.reshape((num_sequences, n, -1) )
if multi_label or len(A ) == 1:
# softmax over the entailment vs. contradiction dim for each label independently
UpperCAmelCase : str = self.entailment_id
UpperCAmelCase : str = -1 if entailment_id == 0 else 0
UpperCAmelCase : Optional[Any] = reshaped_outputs[..., [contradiction_id, entailment_id]]
UpperCAmelCase : int = np.exp(A ) / np.exp(A ).sum(-1 , keepdims=A )
UpperCAmelCase : int = scores[..., 1]
else:
# softmax the "entailment" logits over all candidate labels
UpperCAmelCase : Dict = reshaped_outputs[..., self.entailment_id]
UpperCAmelCase : Optional[int] = np.exp(A ) / np.exp(A ).sum(-1 , keepdims=A )
UpperCAmelCase : int = list(reversed(scores[0].argsort() ) )
return {
"sequence": sequences[0],
"labels": [candidate_labels[i] for i in top_inds],
"scores": scores[0, top_inds].tolist(),
}
| 672 | 1 |
'''simple docstring'''
from scipy.stats import pearsonr
import datasets
a : str = """
Pearson correlation coefficient and p-value for testing non-correlation.
The Pearson correlation coefficient measures the linear relationship between two datasets. The calculation of the p-value relies on the assumption that each dataset is normally distributed. Like other correlation coefficients, this one varies between -1 and +1 with 0 implying no correlation. Correlations of -1 or +1 imply an exact linear relationship. Positive correlations imply that as x increases, so does y. Negative correlations imply that as x increases, y decreases.
The p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Pearson correlation at least as extreme as the one computed from these datasets.
"""
a : Dict = """
Args:
predictions (`list` of `int`): Predicted class labels, as returned by a model.
references (`list` of `int`): Ground truth labels.
return_pvalue (`boolean`): If `True`, returns the p-value, along with the correlation coefficient. If `False`, returns only the correlation coefficient. Defaults to `False`.
Returns:
pearsonr (`float`): Pearson correlation coefficient. Minimum possible value is -1. Maximum possible value is 1. Values of 1 and -1 indicate exact linear positive and negative relationships, respectively. A value of 0 implies no correlation.
p-value (`float`): P-value, which roughly indicates the probability of an The p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Pearson correlation at least as extreme as the one computed from these datasets. Minimum possible value is 0. Maximum possible value is 1. Higher values indicate higher probabilities.
Examples:
Example 1-A simple example using only predictions and references.
>>> pearsonr_metric = datasets.load_metric(\"pearsonr\")
>>> results = pearsonr_metric.compute(predictions=[10, 9, 2.5, 6, 4], references=[1, 2, 3, 4, 5])
>>> print(round(results['pearsonr'], 2))
-0.74
Example 2-The same as Example 1, but that also returns the `p-value`.
>>> pearsonr_metric = datasets.load_metric(\"pearsonr\")
>>> results = pearsonr_metric.compute(predictions=[10, 9, 2.5, 6, 4], references=[1, 2, 3, 4, 5], return_pvalue=True)
>>> print(sorted(list(results.keys())))
['p-value', 'pearsonr']
>>> print(round(results['pearsonr'], 2))
-0.74
>>> print(round(results['p-value'], 2))
0.15
"""
a : Optional[int] = """
@article{2020SciPy-NMeth,
author = {Virtanen, Pauli and Gommers, Ralf and Oliphant, Travis E. and
Haberland, Matt and Reddy, Tyler and Cournapeau, David and
Burovski, Evgeni and Peterson, Pearu and Weckesser, Warren and
Bright, Jonathan and {van der Walt}, St{\'e}fan J. and
Brett, Matthew and Wilson, Joshua and Millman, K. Jarrod and
Mayorov, Nikolay and Nelson, Andrew R. J. and Jones, Eric and
Kern, Robert and Larson, Eric and Carey, C J and
Polat, Ilhan and Feng, Yu and Moore, Eric W. and
{VanderPlas}, Jake and Laxalde, Denis and Perktold, Josef and
Cimrman, Robert and Henriksen, Ian and Quintero, E. A. and
Harris, Charles R. and Archibald, Anne M. and
Ribeiro, Antonio H. and Pedregosa, Fabian and
{van Mulbregt}, Paul and {SciPy 1.0 Contributors}},
title = {{{SciPy} 1.0: Fundamental Algorithms for Scientific
Computing in Python}},
journal = {Nature Methods},
year = {2020},
volume = {17},
pages = {261--272},
adsurl = {https://rdcu.be/b08Wh},
doi = {10.1038/s41592-019-0686-2},
}
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class UpperCamelCase_ ( datasets.Metric ):
def _lowercase( self ) -> Optional[int]:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Value("""float""" ),
"""references""": datasets.Value("""float""" ),
} ) , reference_urls=["""https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.pearsonr.html"""] , )
def _lowercase( self , A , A , A=False ) -> int:
if return_pvalue:
UpperCAmelCase : int = pearsonr(A , A )
return {"pearsonr": results[0], "p-value": results[1]}
else:
return {"pearsonr": float(pearsonr(A , A )[0] )}
| 672 |
'''simple docstring'''
import unittest
from transformers import PegasusTokenizer, PegasusTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
a : List[Any] = get_tests_dir("""fixtures/test_sentencepiece_no_bos.model""")
@require_sentencepiece
@require_tokenizers
class UpperCamelCase_ ( __magic_name__ , unittest.TestCase ):
lowercase = PegasusTokenizer
lowercase = PegasusTokenizerFast
lowercase = True
lowercase = True
def _lowercase( self ) -> Tuple:
super().setUp()
# We have a SentencePiece fixture for testing
UpperCAmelCase : int = PegasusTokenizer(A )
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def _lowercase( self ) -> int:
return PegasusTokenizer.from_pretrained("""google/pegasus-large""" )
def _lowercase( self , **A ) -> PegasusTokenizer:
return PegasusTokenizer.from_pretrained(self.tmpdirname , **A )
def _lowercase( self , A ) -> List[str]:
return ("This is a test", "This is a test")
def _lowercase( self ) -> Optional[Any]:
UpperCAmelCase : Optional[int] = """</s>"""
UpperCAmelCase : Optional[int] = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(A ) , A )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(A ) , A )
def _lowercase( self ) -> Tuple:
UpperCAmelCase : str = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """<pad>""" )
self.assertEqual(vocab_keys[1] , """</s>""" )
self.assertEqual(vocab_keys[-1] , """v""" )
self.assertEqual(len(A ) , 1103 )
def _lowercase( self ) -> Optional[Any]:
self.assertEqual(self.get_tokenizer().vocab_size , 1103 )
def _lowercase( self ) -> int:
UpperCAmelCase : Any = self.rust_tokenizer_class.from_pretrained(self.tmpdirname )
UpperCAmelCase : Optional[Any] = self.tokenizer_class.from_pretrained(self.tmpdirname )
UpperCAmelCase : Union[str, Any] = (
"""Let's see which <unk> is the better <unk_token_11> one <mask_1> It seems like this <mask_2> was important"""
""" </s> <pad> <pad> <pad>"""
)
UpperCAmelCase : Optional[Any] = rust_tokenizer([raw_input_str] , return_tensors=A , add_special_tokens=A ).input_ids[0]
UpperCAmelCase : Optional[Any] = py_tokenizer([raw_input_str] , return_tensors=A , add_special_tokens=A ).input_ids[0]
self.assertListEqual(A , A )
def _lowercase( self ) -> Optional[Any]:
UpperCAmelCase : List[Any] = self._large_tokenizer
# <mask_1> masks whole sentence while <mask_2> masks single word
UpperCAmelCase : Any = """<mask_1> To ensure a <mask_2> flow of bank resolutions."""
UpperCAmelCase : Optional[Any] = [2, 413, 615, 114, 3, 1971, 113, 1679, 10710, 107, 1]
UpperCAmelCase : Optional[Any] = tokenizer([raw_input_str] , return_tensors=A ).input_ids[0]
self.assertListEqual(A , A )
def _lowercase( self ) -> int:
UpperCAmelCase : str = self._large_tokenizer
# The tracebacks for the following asserts are **better** without messages or self.assertEqual
assert tokenizer.vocab_size == 96103
assert tokenizer.pad_token_id == 0
assert tokenizer.eos_token_id == 1
assert tokenizer.offset == 103
assert tokenizer.unk_token_id == tokenizer.offset + 2 == 105
assert tokenizer.unk_token == "<unk>"
assert tokenizer.model_max_length == 1024
UpperCAmelCase : List[Any] = """To ensure a smooth flow of bank resolutions."""
UpperCAmelCase : Optional[int] = [413, 615, 114, 2291, 1971, 113, 1679, 10710, 107, 1]
UpperCAmelCase : Union[str, Any] = tokenizer([raw_input_str] , return_tensors=A ).input_ids[0]
self.assertListEqual(A , A )
assert tokenizer.convert_ids_to_tokens([0, 1, 2, 3] ) == ["<pad>", "</s>", "<mask_1>", "<mask_2>"]
@require_torch
def _lowercase( self ) -> Any:
UpperCAmelCase : int = ["""This is going to be way too long.""" * 150, """short example"""]
UpperCAmelCase : Optional[int] = ["""not super long but more than 5 tokens""", """tiny"""]
UpperCAmelCase : Tuple = self._large_tokenizer(A , padding=A , truncation=A , return_tensors="""pt""" )
UpperCAmelCase : List[Any] = self._large_tokenizer(
text_target=A , max_length=5 , padding=A , truncation=A , return_tensors="""pt""" )
assert batch.input_ids.shape == (2, 1024)
assert batch.attention_mask.shape == (2, 1024)
assert targets["input_ids"].shape == (2, 5)
assert len(A ) == 2 # input_ids, attention_mask.
@slow
def _lowercase( self ) -> List[str]:
# fmt: off
UpperCAmelCase : List[str] = {"""input_ids""": [[38979, 143, 18485, 606, 130, 26669, 87686, 121, 54189, 1129, 111, 26669, 87686, 121, 9114, 14787, 121, 13249, 158, 592, 956, 121, 14621, 31576, 143, 62613, 108, 9688, 930, 43430, 11562, 62613, 304, 108, 11443, 897, 108, 9314, 17415, 63399, 108, 11443, 7614, 18316, 118, 4284, 7148, 12430, 143, 1400, 25703, 158, 111, 4284, 7148, 11772, 143, 21297, 1064, 158, 122, 204, 3506, 1754, 1133, 14787, 1581, 115, 33224, 4482, 111, 1355, 110, 29173, 317, 50833, 108, 20147, 94665, 111, 77198, 107, 1], [110, 62613, 117, 638, 112, 1133, 121, 20098, 1355, 79050, 13872, 135, 1596, 53541, 1352, 141, 13039, 5542, 124, 302, 518, 111, 268, 2956, 115, 149, 4427, 107, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [139, 1235, 2799, 18289, 17780, 204, 109, 9474, 1296, 107, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=A , model_name="""google/bigbird-pegasus-large-arxiv""" , revision="""ba85d0851d708441f91440d509690f1ab6353415""" , )
@require_sentencepiece
@require_tokenizers
class UpperCamelCase_ ( __magic_name__ , unittest.TestCase ):
lowercase = PegasusTokenizer
lowercase = PegasusTokenizerFast
lowercase = True
lowercase = True
def _lowercase( self ) -> List[Any]:
super().setUp()
# We have a SentencePiece fixture for testing
UpperCAmelCase : int = PegasusTokenizer(A , offset=0 , mask_token_sent=A , mask_token="""[MASK]""" )
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def _lowercase( self ) -> Optional[Any]:
return PegasusTokenizer.from_pretrained("""google/bigbird-pegasus-large-arxiv""" )
def _lowercase( self , **A ) -> PegasusTokenizer:
return PegasusTokenizer.from_pretrained(self.tmpdirname , **A )
def _lowercase( self , A ) -> str:
return ("This is a test", "This is a test")
def _lowercase( self ) -> Union[str, Any]:
UpperCAmelCase : Dict = self.rust_tokenizer_class.from_pretrained(self.tmpdirname )
UpperCAmelCase : Any = self.tokenizer_class.from_pretrained(self.tmpdirname )
UpperCAmelCase : str = (
"""Let's see which <unk> is the better <unk_token> one [MASK] It seems like this [MASK] was important </s>"""
""" <pad> <pad> <pad>"""
)
UpperCAmelCase : List[str] = rust_tokenizer([raw_input_str] , return_tensors=A , add_special_tokens=A ).input_ids[0]
UpperCAmelCase : str = py_tokenizer([raw_input_str] , return_tensors=A , add_special_tokens=A ).input_ids[0]
self.assertListEqual(A , A )
@require_torch
def _lowercase( self ) -> Optional[int]:
UpperCAmelCase : Union[str, Any] = ["""This is going to be way too long.""" * 1000, """short example"""]
UpperCAmelCase : Any = ["""not super long but more than 5 tokens""", """tiny"""]
UpperCAmelCase : int = self._large_tokenizer(A , padding=A , truncation=A , return_tensors="""pt""" )
UpperCAmelCase : Optional[int] = self._large_tokenizer(
text_target=A , max_length=5 , padding=A , truncation=A , return_tensors="""pt""" )
assert batch.input_ids.shape == (2, 4096)
assert batch.attention_mask.shape == (2, 4096)
assert targets["input_ids"].shape == (2, 5)
assert len(A ) == 2 # input_ids, attention_mask.
def _lowercase( self ) -> int:
UpperCAmelCase : Union[str, Any] = (
"""This is an example string that is used to test the original TF implementation against the HF"""
""" implementation"""
)
UpperCAmelCase : Optional[Any] = self._large_tokenizer(A ).input_ids
self.assertListEqual(
A , [182, 117, 142, 587, 4211, 120, 117, 263, 112, 804, 109, 856, 25016, 3137, 464, 109, 26955, 3137, 1] , )
| 672 | 1 |
'''simple docstring'''
from ...utils import is_torch_available, is_transformers_available
if is_transformers_available() and is_torch_available():
from .pipeline_vq_diffusion import LearnedClassifierFreeSamplingEmbeddings, VQDiffusionPipeline
| 672 |
'''simple docstring'''
import inspect
import unittest
from transformers import ConvNextVaConfig
from transformers.models.auto import get_values
from transformers.models.auto.modeling_auto import MODEL_FOR_BACKBONE_MAPPING_NAMES, MODEL_MAPPING_NAMES
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import ConvNextVaBackbone, ConvNextVaForImageClassification, ConvNextVaModel
from transformers.models.convnextva.modeling_convnextva import CONVNEXTV2_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class UpperCamelCase_ :
def __init__( self , A , A=13 , A=32 , A=3 , A=4 , A=[10, 20, 30, 40] , A=[2, 2, 3, 2] , A=True , A=True , A=37 , A="gelu" , A=10 , A=0.0_2 , A=["stage2", "stage3", "stage4"] , A=[2, 3, 4] , A=None , ) -> int:
UpperCAmelCase : str = parent
UpperCAmelCase : List[Any] = batch_size
UpperCAmelCase : Dict = image_size
UpperCAmelCase : Tuple = num_channels
UpperCAmelCase : Union[str, Any] = num_stages
UpperCAmelCase : Any = hidden_sizes
UpperCAmelCase : str = depths
UpperCAmelCase : Optional[int] = is_training
UpperCAmelCase : Union[str, Any] = use_labels
UpperCAmelCase : Any = intermediate_size
UpperCAmelCase : str = hidden_act
UpperCAmelCase : List[str] = num_labels
UpperCAmelCase : Tuple = initializer_range
UpperCAmelCase : Optional[Any] = out_features
UpperCAmelCase : List[str] = out_indices
UpperCAmelCase : Any = scope
def _lowercase( self ) -> Union[str, Any]:
UpperCAmelCase : Tuple = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCAmelCase : List[Any] = None
if self.use_labels:
UpperCAmelCase : Dict = ids_tensor([self.batch_size] , self.num_labels )
UpperCAmelCase : List[str] = self.get_config()
return config, pixel_values, labels
def _lowercase( self ) -> Optional[Any]:
return ConvNextVaConfig(
num_channels=self.num_channels , hidden_sizes=self.hidden_sizes , depths=self.depths , num_stages=self.num_stages , hidden_act=self.hidden_act , is_decoder=A , initializer_range=self.initializer_range , out_features=self.out_features , out_indices=self.out_indices , num_labels=self.num_labels , )
def _lowercase( self , A , A , A ) -> Optional[Any]:
UpperCAmelCase : int = ConvNextVaModel(config=A )
model.to(A )
model.eval()
UpperCAmelCase : List[Any] = model(A )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def _lowercase( self , A , A , A ) -> Any:
UpperCAmelCase : List[str] = ConvNextVaForImageClassification(A )
model.to(A )
model.eval()
UpperCAmelCase : int = model(A , labels=A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _lowercase( self , A , A , A ) -> Any:
UpperCAmelCase : Optional[Any] = ConvNextVaBackbone(config=A )
model.to(A )
model.eval()
UpperCAmelCase : Any = model(A )
# verify hidden states
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[1], 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , config.hidden_sizes[1:] )
# verify backbone works with out_features=None
UpperCAmelCase : Any = None
UpperCAmelCase : Optional[int] = ConvNextVaBackbone(config=A )
model.to(A )
model.eval()
UpperCAmelCase : int = model(A )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[-1], 1, 1] )
# verify channels
self.parent.assertEqual(len(model.channels ) , 1 )
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] )
def _lowercase( self ) -> List[str]:
UpperCAmelCase : Dict = self.prepare_config_and_inputs()
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : List[Any] = config_and_inputs
UpperCAmelCase : str = {"""pixel_values""": pixel_values}
return config, inputs_dict
def _lowercase( self ) -> List[Any]:
UpperCAmelCase : List[str] = self.prepare_config_and_inputs()
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : Optional[Any] = config_and_inputs
UpperCAmelCase : List[str] = {"""pixel_values""": pixel_values, """labels""": labels}
return config, inputs_dict
@require_torch
class UpperCamelCase_ ( __magic_name__ , __magic_name__ , unittest.TestCase ):
lowercase = (
(
ConvNextVaModel,
ConvNextVaForImageClassification,
ConvNextVaBackbone,
)
if is_torch_available()
else ()
)
lowercase = (
{'feature-extraction': ConvNextVaModel, 'image-classification': ConvNextVaForImageClassification}
if is_torch_available()
else {}
)
lowercase = False
lowercase = False
lowercase = False
lowercase = False
lowercase = False
def _lowercase( self ) -> Optional[int]:
UpperCAmelCase : Dict = ConvNextVaModelTester(self )
UpperCAmelCase : List[str] = ConfigTester(self , config_class=A , has_text_modality=A , hidden_size=37 )
def _lowercase( self ) -> int:
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def _lowercase( self ) -> List[str]:
return
@unittest.skip(reason="""ConvNextV2 does not use inputs_embeds""" )
def _lowercase( self ) -> Dict:
pass
@unittest.skip(reason="""ConvNextV2 does not support input and output embeddings""" )
def _lowercase( self ) -> Any:
pass
@unittest.skip(reason="""ConvNextV2 does not use feedforward chunking""" )
def _lowercase( self ) -> int:
pass
def _lowercase( self ) -> Dict:
if not self.model_tester.is_training:
return
for model_class in self.all_model_classes:
UpperCAmelCase , UpperCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs_with_labels()
UpperCAmelCase : Optional[int] = True
if model_class.__name__ in [
*get_values(A ),
*get_values(A ),
]:
continue
UpperCAmelCase : Any = model_class(A )
model.to(A )
model.train()
UpperCAmelCase : List[str] = self._prepare_for_class(A , A , return_labels=A )
UpperCAmelCase : List[str] = model(**A ).loss
loss.backward()
def _lowercase( self ) -> Tuple:
if not self.model_tester.is_training:
return
for model_class in self.all_model_classes:
UpperCAmelCase , UpperCAmelCase : Any = self.model_tester.prepare_config_and_inputs_with_labels()
UpperCAmelCase : List[str] = False
UpperCAmelCase : int = True
if (
model_class.__name__
in [*get_values(A ), *get_values(A )]
or not model_class.supports_gradient_checkpointing
):
continue
UpperCAmelCase : Dict = model_class(A )
model.to(A )
model.gradient_checkpointing_enable()
model.train()
UpperCAmelCase : Any = self._prepare_for_class(A , A , return_labels=A )
UpperCAmelCase : Any = model(**A ).loss
loss.backward()
def _lowercase( self ) -> Tuple:
UpperCAmelCase , UpperCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase : int = model_class(A )
UpperCAmelCase : List[str] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase : Tuple = [*signature.parameters.keys()]
UpperCAmelCase : Optional[Any] = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , A )
def _lowercase( self ) -> Optional[Any]:
UpperCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*A )
def _lowercase( self ) -> List[str]:
def check_hidden_states_output(A , A , A ):
UpperCAmelCase : Optional[Any] = model_class(A )
model.to(A )
model.eval()
with torch.no_grad():
UpperCAmelCase : Dict = model(**self._prepare_for_class(A , A ) )
UpperCAmelCase : Tuple = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
UpperCAmelCase : Optional[Any] = self.model_tester.num_stages
self.assertEqual(len(A ) , expected_num_stages + 1 )
# ConvNextV2's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
UpperCAmelCase , UpperCAmelCase : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase : str = True
check_hidden_states_output(A , A , A )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
UpperCAmelCase : int = True
check_hidden_states_output(A , A , A )
def _lowercase( self ) -> Tuple:
UpperCAmelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*A )
@slow
def _lowercase( self ) -> Any:
for model_name in CONVNEXTV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase : Tuple = ConvNextVaModel.from_pretrained(A )
self.assertIsNotNone(A )
def __lowerCamelCase ( ) -> Optional[int]:
UpperCAmelCase : Optional[int] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class UpperCamelCase_ ( unittest.TestCase ):
@cached_property
def _lowercase( self ) -> str:
return AutoImageProcessor.from_pretrained("""facebook/convnextv2-tiny-1k-224""" ) if is_vision_available() else None
@slow
def _lowercase( self ) -> List[Any]:
UpperCAmelCase : Any = ConvNextVaForImageClassification.from_pretrained("""facebook/convnextv2-tiny-1k-224""" ).to(A )
UpperCAmelCase : List[Any] = self.default_image_processor
UpperCAmelCase : Any = prepare_img()
UpperCAmelCase : Tuple = preprocessor(images=A , return_tensors="""pt""" ).to(A )
# forward pass
with torch.no_grad():
UpperCAmelCase : Optional[Any] = model(**A )
# verify the logits
UpperCAmelCase : Dict = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , A )
UpperCAmelCase : Dict = torch.tensor([0.9_9_9_6, 0.1_9_6_6, -0.4_3_8_6] ).to(A )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , A , atol=1e-4 ) )
| 672 | 1 |
'''simple docstring'''
import unittest
from transformers import AlbertTokenizer, AlbertTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
a : int = get_tests_dir("""fixtures/spiece.model""")
@require_sentencepiece
@require_tokenizers
class UpperCamelCase_ ( __magic_name__ , unittest.TestCase ):
lowercase = AlbertTokenizer
lowercase = AlbertTokenizerFast
lowercase = True
lowercase = True
lowercase = True
def _lowercase( self ) -> Tuple:
super().setUp()
# We have a SentencePiece fixture for testing
UpperCAmelCase : Union[str, Any] = AlbertTokenizer(A )
tokenizer.save_pretrained(self.tmpdirname )
def _lowercase( self , A ) -> int:
UpperCAmelCase : str = """this is a test"""
UpperCAmelCase : Any = """this is a test"""
return input_text, output_text
def _lowercase( self ) -> Optional[int]:
UpperCAmelCase : int = """<pad>"""
UpperCAmelCase : Any = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(A ) , A )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(A ) , A )
def _lowercase( self ) -> Union[str, Any]:
UpperCAmelCase : str = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """<pad>""" )
self.assertEqual(vocab_keys[1] , """<unk>""" )
self.assertEqual(vocab_keys[-1] , """▁eloquent""" )
self.assertEqual(len(A ) , 30000 )
def _lowercase( self ) -> str:
self.assertEqual(self.get_tokenizer().vocab_size , 30000 )
def _lowercase( self ) -> Optional[int]:
if not self.test_rust_tokenizer:
return
UpperCAmelCase : Union[str, Any] = self.get_tokenizer()
UpperCAmelCase : Optional[int] = self.get_rust_tokenizer()
UpperCAmelCase : Optional[int] = """I was born in 92000, and this is falsé."""
UpperCAmelCase : List[Any] = tokenizer.tokenize(A )
UpperCAmelCase : Union[str, Any] = rust_tokenizer.tokenize(A )
self.assertListEqual(A , A )
UpperCAmelCase : Optional[int] = tokenizer.encode(A , add_special_tokens=A )
UpperCAmelCase : Tuple = rust_tokenizer.encode(A , add_special_tokens=A )
self.assertListEqual(A , A )
UpperCAmelCase : Union[str, Any] = self.get_rust_tokenizer()
UpperCAmelCase : Dict = tokenizer.encode(A )
UpperCAmelCase : List[Any] = rust_tokenizer.encode(A )
self.assertListEqual(A , A )
def _lowercase( self ) -> Optional[int]:
UpperCAmelCase : List[Any] = AlbertTokenizer(A , keep_accents=A )
UpperCAmelCase : Union[str, Any] = tokenizer.tokenize("""This is a test""" )
self.assertListEqual(A , ["""▁this""", """▁is""", """▁a""", """▁test"""] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(A ) , [48, 25, 21, 1289] )
UpperCAmelCase : List[str] = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" )
self.assertListEqual(
A , ["""▁i""", """▁was""", """▁born""", """▁in""", """▁9""", """2000""", """,""", """▁and""", """▁this""", """▁is""", """▁fal""", """s""", """é""", """."""] )
UpperCAmelCase : Tuple = tokenizer.convert_tokens_to_ids(A )
self.assertListEqual(A , [31, 23, 386, 19, 561, 3050, 15, 17, 48, 25, 8256, 18, 1, 9] )
UpperCAmelCase : Any = tokenizer.convert_ids_to_tokens(A )
self.assertListEqual(
A , ["""▁i""", """▁was""", """▁born""", """▁in""", """▁9""", """2000""", """,""", """▁and""", """▁this""", """▁is""", """▁fal""", """s""", """<unk>""", """."""] , )
def _lowercase( self ) -> Optional[Any]:
UpperCAmelCase : int = AlbertTokenizer(A )
UpperCAmelCase : Dict = tokenizer.encode("""sequence builders""" )
UpperCAmelCase : int = tokenizer.encode("""multi-sequence build""" )
UpperCAmelCase : List[Any] = tokenizer.build_inputs_with_special_tokens(A )
UpperCAmelCase : List[Any] = tokenizer.build_inputs_with_special_tokens(A , A )
assert encoded_sentence == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id]
assert encoded_pair == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [
tokenizer.sep_token_id
]
@slow
def _lowercase( self ) -> Union[str, Any]:
# fmt: off
UpperCAmelCase : Union[str, Any] = {"""attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], """input_ids""": [[2, 21970, 13, 5, 6092, 167, 28, 7103, 2153, 673, 8, 7028, 12051, 18, 17, 7103, 2153, 673, 8, 3515, 18684, 8, 4461, 6, 1927, 297, 8, 12060, 2607, 18, 13, 5, 4461, 15, 10538, 38, 8, 135, 15, 822, 58, 15, 993, 10363, 15, 1460, 8005, 4461, 15, 993, 255, 2328, 9, 9, 9, 6, 26, 1112, 816, 3260, 13, 5, 103, 2377, 6, 17, 1112, 816, 2782, 13, 5, 103, 10641, 6, 29, 84, 2512, 2430, 782, 18684, 2761, 19, 808, 2430, 2556, 17, 855, 1480, 9477, 4091, 128, 11712, 15, 7103, 2153, 673, 17, 24883, 9990, 9, 3], [2, 11502, 25, 1006, 20, 782, 8, 11809, 855, 1732, 19393, 18667, 37, 367, 21018, 69, 1854, 34, 11860, 19124, 27, 156, 225, 17, 193, 4141, 19, 65, 9124, 9, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [2, 14, 2231, 886, 2385, 17659, 84, 14, 16792, 1952, 9, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], """token_type_ids""": [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=A , model_name="""albert-base-v2""" , revision="""6b6560eaf5ff2e250b00c50f380c5389a9c2d82e""" , )
| 672 |
'''simple docstring'''
from scipy.stats import pearsonr
import datasets
a : str = """
Pearson correlation coefficient and p-value for testing non-correlation.
The Pearson correlation coefficient measures the linear relationship between two datasets. The calculation of the p-value relies on the assumption that each dataset is normally distributed. Like other correlation coefficients, this one varies between -1 and +1 with 0 implying no correlation. Correlations of -1 or +1 imply an exact linear relationship. Positive correlations imply that as x increases, so does y. Negative correlations imply that as x increases, y decreases.
The p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Pearson correlation at least as extreme as the one computed from these datasets.
"""
a : Dict = """
Args:
predictions (`list` of `int`): Predicted class labels, as returned by a model.
references (`list` of `int`): Ground truth labels.
return_pvalue (`boolean`): If `True`, returns the p-value, along with the correlation coefficient. If `False`, returns only the correlation coefficient. Defaults to `False`.
Returns:
pearsonr (`float`): Pearson correlation coefficient. Minimum possible value is -1. Maximum possible value is 1. Values of 1 and -1 indicate exact linear positive and negative relationships, respectively. A value of 0 implies no correlation.
p-value (`float`): P-value, which roughly indicates the probability of an The p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Pearson correlation at least as extreme as the one computed from these datasets. Minimum possible value is 0. Maximum possible value is 1. Higher values indicate higher probabilities.
Examples:
Example 1-A simple example using only predictions and references.
>>> pearsonr_metric = datasets.load_metric(\"pearsonr\")
>>> results = pearsonr_metric.compute(predictions=[10, 9, 2.5, 6, 4], references=[1, 2, 3, 4, 5])
>>> print(round(results['pearsonr'], 2))
-0.74
Example 2-The same as Example 1, but that also returns the `p-value`.
>>> pearsonr_metric = datasets.load_metric(\"pearsonr\")
>>> results = pearsonr_metric.compute(predictions=[10, 9, 2.5, 6, 4], references=[1, 2, 3, 4, 5], return_pvalue=True)
>>> print(sorted(list(results.keys())))
['p-value', 'pearsonr']
>>> print(round(results['pearsonr'], 2))
-0.74
>>> print(round(results['p-value'], 2))
0.15
"""
a : Optional[int] = """
@article{2020SciPy-NMeth,
author = {Virtanen, Pauli and Gommers, Ralf and Oliphant, Travis E. and
Haberland, Matt and Reddy, Tyler and Cournapeau, David and
Burovski, Evgeni and Peterson, Pearu and Weckesser, Warren and
Bright, Jonathan and {van der Walt}, St{\'e}fan J. and
Brett, Matthew and Wilson, Joshua and Millman, K. Jarrod and
Mayorov, Nikolay and Nelson, Andrew R. J. and Jones, Eric and
Kern, Robert and Larson, Eric and Carey, C J and
Polat, Ilhan and Feng, Yu and Moore, Eric W. and
{VanderPlas}, Jake and Laxalde, Denis and Perktold, Josef and
Cimrman, Robert and Henriksen, Ian and Quintero, E. A. and
Harris, Charles R. and Archibald, Anne M. and
Ribeiro, Antonio H. and Pedregosa, Fabian and
{van Mulbregt}, Paul and {SciPy 1.0 Contributors}},
title = {{{SciPy} 1.0: Fundamental Algorithms for Scientific
Computing in Python}},
journal = {Nature Methods},
year = {2020},
volume = {17},
pages = {261--272},
adsurl = {https://rdcu.be/b08Wh},
doi = {10.1038/s41592-019-0686-2},
}
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class UpperCamelCase_ ( datasets.Metric ):
def _lowercase( self ) -> Optional[int]:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Value("""float""" ),
"""references""": datasets.Value("""float""" ),
} ) , reference_urls=["""https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.pearsonr.html"""] , )
def _lowercase( self , A , A , A=False ) -> int:
if return_pvalue:
UpperCAmelCase : int = pearsonr(A , A )
return {"pearsonr": results[0], "p-value": results[1]}
else:
return {"pearsonr": float(pearsonr(A , A )[0] )}
| 672 | 1 |
'''simple docstring'''
import numpy as np
def __lowerCamelCase ( _lowercase ) -> np.array:
return 1 / (1 + np.exp(-vector ))
def __lowerCamelCase ( _lowercase ) -> np.array:
return vector * sigmoid(1.702 * vector )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 672 |
'''simple docstring'''
from __future__ import annotations
from collections.abc import Sequence
from typing import Literal
def __lowerCamelCase ( _lowercase , _lowercase ) -> str | Literal[False]:
UpperCAmelCase : Optional[int] = list(_lowercase )
UpperCAmelCase : Dict = list(_lowercase )
UpperCAmelCase : str = 0
for i in range(len(_lowercase ) ):
if lista[i] != lista[i]:
count += 1
UpperCAmelCase : Optional[Any] = """_"""
if count > 1:
return False
else:
return "".join(_lowercase )
def __lowerCamelCase ( _lowercase ) -> list[str]:
UpperCAmelCase : List[str] = []
while True:
UpperCAmelCase : Optional[int] = ["""$"""] * len(_lowercase )
UpperCAmelCase : int = []
for i in range(len(_lowercase ) ):
for j in range(i + 1 , len(_lowercase ) ):
UpperCAmelCase : str = compare_string(binary[i] , binary[j] )
if k is False:
UpperCAmelCase : Union[str, Any] = """*"""
UpperCAmelCase : Optional[Any] = """*"""
temp.append("""X""" )
for i in range(len(_lowercase ) ):
if checka[i] == "$":
pi.append(binary[i] )
if len(_lowercase ) == 0:
return pi
UpperCAmelCase : List[Any] = list(set(_lowercase ) )
def __lowerCamelCase ( _lowercase , _lowercase ) -> list[str]:
UpperCAmelCase : Dict = []
for minterm in minterms:
UpperCAmelCase : List[str] = """"""
for _ in range(_lowercase ):
UpperCAmelCase : Dict = str(minterm % 2 ) + string
minterm //= 2
temp.append(_lowercase )
return temp
def __lowerCamelCase ( _lowercase , _lowercase , _lowercase ) -> bool:
UpperCAmelCase : Optional[int] = list(_lowercase )
UpperCAmelCase : Dict = list(_lowercase )
UpperCAmelCase : Dict = 0
for i in range(len(_lowercase ) ):
if lista[i] != lista[i]:
count_n += 1
return count_n == count
def __lowerCamelCase ( _lowercase , _lowercase ) -> list[str]:
UpperCAmelCase : Tuple = []
UpperCAmelCase : Optional[int] = [0] * len(_lowercase )
for i in range(len(chart[0] ) ):
UpperCAmelCase : Any = 0
UpperCAmelCase : Optional[Any] = -1
for j in range(len(_lowercase ) ):
if chart[j][i] == 1:
count += 1
UpperCAmelCase : str = j
if count == 1:
UpperCAmelCase : Optional[int] = 1
for i in range(len(_lowercase ) ):
if select[i] == 1:
for j in range(len(chart[0] ) ):
if chart[i][j] == 1:
for k in range(len(_lowercase ) ):
UpperCAmelCase : List[str] = 0
temp.append(prime_implicants[i] )
while True:
UpperCAmelCase : int = 0
UpperCAmelCase : Tuple = -1
UpperCAmelCase : Union[str, Any] = 0
for i in range(len(_lowercase ) ):
UpperCAmelCase : Optional[Any] = chart[i].count(1 )
if count_n > max_n:
UpperCAmelCase : Union[str, Any] = count_n
UpperCAmelCase : Optional[Any] = i
if max_n == 0:
return temp
temp.append(prime_implicants[rem] )
for i in range(len(chart[0] ) ):
if chart[rem][i] == 1:
for j in range(len(_lowercase ) ):
UpperCAmelCase : Optional[Any] = 0
def __lowerCamelCase ( _lowercase , _lowercase ) -> list[list[int]]:
UpperCAmelCase : Optional[int] = [[0 for x in range(len(_lowercase ) )] for x in range(len(_lowercase ) )]
for i in range(len(_lowercase ) ):
UpperCAmelCase : Tuple = prime_implicants[i].count("""_""" )
for j in range(len(_lowercase ) ):
if is_for_table(prime_implicants[i] , binary[j] , _lowercase ):
UpperCAmelCase : List[Any] = 1
return chart
def __lowerCamelCase ( ) -> None:
UpperCAmelCase : str = int(input("""Enter the no. of variables\n""" ) )
UpperCAmelCase : List[Any] = [
float(_lowercase )
for x in input(
"""Enter the decimal representation of Minterms 'Spaces Separated'\n""" ).split()
]
UpperCAmelCase : str = decimal_to_binary(_lowercase , _lowercase )
UpperCAmelCase : Tuple = check(_lowercase )
print("""Prime Implicants are:""" )
print(_lowercase )
UpperCAmelCase : Union[str, Any] = prime_implicant_chart(_lowercase , _lowercase )
UpperCAmelCase : Tuple = selection(_lowercase , _lowercase )
print("""Essential Prime Implicants are:""" )
print(_lowercase )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 672 | 1 |
'''simple docstring'''
import warnings
from diffusers import StableDiffusionInpaintPipeline as StableDiffusionInpaintPipeline # noqa F401
warnings.warn(
"""The `inpainting.py` script is outdated. Please use directly `from diffusers import"""
""" StableDiffusionInpaintPipeline` instead."""
)
| 672 |
'''simple docstring'''
a : Tuple = [sum(int(c, 1_0) ** 2 for c in i.__str__()) for i in range(1_0_0_0_0_0)]
def __lowerCamelCase ( _lowercase ) -> int:
UpperCAmelCase : str = 0
while number:
# Increased Speed Slightly by checking every 5 digits together.
sum_of_digits_squared += DIGITS_SQUARED[number % 1_0_0_0_0_0]
number //= 1_0_0_0_0_0
return sum_of_digits_squared
# There are 2 Chains made,
# One ends with 89 with the chain member 58 being the one which when declared first,
# there will be the least number of iterations for all the members to be checked.
# The other one ends with 1 and has only one element 1.
# So 58 and 1 are chosen to be declared at the starting.
# Changed dictionary to an array to quicken the solution
a : list[bool | None] = [None] * 1_0_0_0_0_0_0_0
a : Optional[Any] = True
a : List[Any] = False
def __lowerCamelCase ( _lowercase ) -> bool:
if CHAINS[number - 1] is not None:
return CHAINS[number - 1] # type: ignore
UpperCAmelCase : List[str] = chain(next_number(_lowercase ) )
UpperCAmelCase : Tuple = number_chain
while number < 1_0_0_0_0_0_0_0:
UpperCAmelCase : List[str] = number_chain
number *= 1_0
return number_chain
def __lowerCamelCase ( _lowercase = 1_0_0_0_0_0_0_0 ) -> int:
for i in range(1 , _lowercase ):
if CHAINS[i] is None:
chain(i + 1 )
return CHAINS[:number].count(_lowercase )
if __name__ == "__main__":
import doctest
doctest.testmod()
print(F'''{solution() = }''')
| 672 | 1 |
'''simple docstring'''
import argparse
from transformers import CLIPImageProcessor, CLIPVisionModelWithProjection
from diffusers import UnCLIPImageVariationPipeline, UnCLIPPipeline
if __name__ == "__main__":
a : Tuple = argparse.ArgumentParser()
parser.add_argument("""--dump_path""", default=None, type=str, required=True, help="""Path to the output model.""")
parser.add_argument(
"""--txt2img_unclip""",
default="""kakaobrain/karlo-v1-alpha""",
type=str,
required=False,
help="""The pretrained txt2img unclip.""",
)
a : List[str] = parser.parse_args()
a : List[str] = UnCLIPPipeline.from_pretrained(args.txtaimg_unclip)
a : Optional[Any] = CLIPImageProcessor()
a : str = CLIPVisionModelWithProjection.from_pretrained("""openai/clip-vit-large-patch14""")
a : str = UnCLIPImageVariationPipeline(
decoder=txtaimg.decoder,
text_encoder=txtaimg.text_encoder,
tokenizer=txtaimg.tokenizer,
text_proj=txtaimg.text_proj,
feature_extractor=feature_extractor,
image_encoder=image_encoder,
super_res_first=txtaimg.super_res_first,
super_res_last=txtaimg.super_res_last,
decoder_scheduler=txtaimg.decoder_scheduler,
super_res_scheduler=txtaimg.super_res_scheduler,
)
imgaimg.save_pretrained(args.dump_path)
| 672 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tensorflow_text_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
a : Optional[Any] = {
"""configuration_bert""": ["""BERT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """BertConfig""", """BertOnnxConfig"""],
"""tokenization_bert""": ["""BasicTokenizer""", """BertTokenizer""", """WordpieceTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : Dict = ["""BertTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : List[Any] = [
"""BERT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""BertForMaskedLM""",
"""BertForMultipleChoice""",
"""BertForNextSentencePrediction""",
"""BertForPreTraining""",
"""BertForQuestionAnswering""",
"""BertForSequenceClassification""",
"""BertForTokenClassification""",
"""BertLayer""",
"""BertLMHeadModel""",
"""BertModel""",
"""BertPreTrainedModel""",
"""load_tf_weights_in_bert""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : List[str] = [
"""TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFBertEmbeddings""",
"""TFBertForMaskedLM""",
"""TFBertForMultipleChoice""",
"""TFBertForNextSentencePrediction""",
"""TFBertForPreTraining""",
"""TFBertForQuestionAnswering""",
"""TFBertForSequenceClassification""",
"""TFBertForTokenClassification""",
"""TFBertLMHeadModel""",
"""TFBertMainLayer""",
"""TFBertModel""",
"""TFBertPreTrainedModel""",
]
try:
if not is_tensorflow_text_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : int = ["""TFBertTokenizer"""]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : Optional[Any] = [
"""FlaxBertForCausalLM""",
"""FlaxBertForMaskedLM""",
"""FlaxBertForMultipleChoice""",
"""FlaxBertForNextSentencePrediction""",
"""FlaxBertForPreTraining""",
"""FlaxBertForQuestionAnswering""",
"""FlaxBertForSequenceClassification""",
"""FlaxBertForTokenClassification""",
"""FlaxBertModel""",
"""FlaxBertPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_bert import BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, BertConfig, BertOnnxConfig
from .tokenization_bert import BasicTokenizer, BertTokenizer, WordpieceTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bert_fast import BertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_bert import (
BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
BertForMaskedLM,
BertForMultipleChoice,
BertForNextSentencePrediction,
BertForPreTraining,
BertForQuestionAnswering,
BertForSequenceClassification,
BertForTokenClassification,
BertLayer,
BertLMHeadModel,
BertModel,
BertPreTrainedModel,
load_tf_weights_in_bert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_bert import (
TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFBertEmbeddings,
TFBertForMaskedLM,
TFBertForMultipleChoice,
TFBertForNextSentencePrediction,
TFBertForPreTraining,
TFBertForQuestionAnswering,
TFBertForSequenceClassification,
TFBertForTokenClassification,
TFBertLMHeadModel,
TFBertMainLayer,
TFBertModel,
TFBertPreTrainedModel,
)
try:
if not is_tensorflow_text_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bert_tf import TFBertTokenizer
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_bert import (
FlaxBertForCausalLM,
FlaxBertForMaskedLM,
FlaxBertForMultipleChoice,
FlaxBertForNextSentencePrediction,
FlaxBertForPreTraining,
FlaxBertForQuestionAnswering,
FlaxBertForSequenceClassification,
FlaxBertForTokenClassification,
FlaxBertModel,
FlaxBertPreTrainedModel,
)
else:
import sys
a : Optional[int] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 672 | 1 |
'''simple docstring'''
import qiskit
def __lowerCamelCase ( _lowercase , _lowercase ) -> qiskit.result.counts.Counts:
UpperCAmelCase : int = qiskit.Aer.get_backend("""aer_simulator""" )
# Create a Quantum Circuit acting on the q register
UpperCAmelCase : List[str] = qiskit.QuantumCircuit(_lowercase , _lowercase )
# Apply X (NOT) Gate to Qubits 0 & 1
circuit.x(0 )
circuit.x(1 )
# Map the quantum measurement to the classical bits
circuit.measure([0, 1] , [0, 1] )
# Execute the circuit on the qasm simulator
UpperCAmelCase : Any = qiskit.execute(_lowercase , _lowercase , shots=1_0_0_0 )
# Return the histogram data of the results of the experiment.
return job.result().get_counts(_lowercase )
if __name__ == "__main__":
a : Optional[int] = single_qubit_measure(2, 2)
print(F'''Total count for various states are: {counts}''')
| 672 |
'''simple docstring'''
import math
import random
from typing import Any
from .hill_climbing import SearchProblem
def __lowerCamelCase ( _lowercase , _lowercase = True , _lowercase = math.inf , _lowercase = -math.inf , _lowercase = math.inf , _lowercase = -math.inf , _lowercase = False , _lowercase = 1_0_0 , _lowercase = 0.01 , _lowercase = 1 , ) -> Any:
UpperCAmelCase : Optional[int] = False
UpperCAmelCase : Any = search_prob
UpperCAmelCase : Any = start_temperate
UpperCAmelCase : Optional[Any] = []
UpperCAmelCase : Optional[Any] = 0
UpperCAmelCase : Optional[Any] = None
while not search_end:
UpperCAmelCase : List[str] = current_state.score()
if best_state is None or current_score > best_state.score():
UpperCAmelCase : List[Any] = current_state
scores.append(_lowercase )
iterations += 1
UpperCAmelCase : Dict = None
UpperCAmelCase : Union[str, Any] = current_state.get_neighbors()
while (
next_state is None and neighbors
): # till we do not find a neighbor that we can move to
UpperCAmelCase : int = random.randint(0 , len(_lowercase ) - 1 ) # picking a random neighbor
UpperCAmelCase : int = neighbors.pop(_lowercase )
UpperCAmelCase : Tuple = picked_neighbor.score() - current_score
if (
picked_neighbor.x > max_x
or picked_neighbor.x < min_x
or picked_neighbor.y > max_y
or picked_neighbor.y < min_y
):
continue # neighbor outside our bounds
if not find_max:
UpperCAmelCase : Union[str, Any] = change * -1 # in case we are finding minimum
if change > 0: # improves the solution
UpperCAmelCase : int = picked_neighbor
else:
UpperCAmelCase : Optional[Any] = (math.e) ** (
change / current_temp
) # probability generation function
if random.random() < probability: # random number within probability
UpperCAmelCase : Optional[int] = picked_neighbor
UpperCAmelCase : List[Any] = current_temp - (current_temp * rate_of_decrease)
if current_temp < threshold_temp or next_state is None:
# temperature below threshold, or could not find a suitable neighbor
UpperCAmelCase : Optional[int] = True
else:
UpperCAmelCase : Optional[int] = next_state
if visualization:
from matplotlib import pyplot as plt
plt.plot(range(_lowercase ) , _lowercase )
plt.xlabel("""Iterations""" )
plt.ylabel("""Function values""" )
plt.show()
return best_state
if __name__ == "__main__":
def __lowerCamelCase ( _lowercase , _lowercase ) -> str:
return (x**2) + (y**2)
# starting the problem with initial coordinates (12, 47)
a : Dict = SearchProblem(x=1_2, y=4_7, step_size=1, function_to_optimize=test_fa)
a : Dict = simulated_annealing(
prob, find_max=False, max_x=1_0_0, min_x=5, max_y=5_0, min_y=-5, visualization=True
)
print(
"""The minimum score for f(x, y) = x^2 + y^2 with the domain 100 > x > 5 """
F'''and 50 > y > - 5 found via hill climbing: {local_min.score()}'''
)
# starting the problem with initial coordinates (12, 47)
a : List[str] = SearchProblem(x=1_2, y=4_7, step_size=1, function_to_optimize=test_fa)
a : Dict = simulated_annealing(
prob, find_max=True, max_x=1_0_0, min_x=5, max_y=5_0, min_y=-5, visualization=True
)
print(
"""The maximum score for f(x, y) = x^2 + y^2 with the domain 100 > x > 5 """
F'''and 50 > y > - 5 found via hill climbing: {local_min.score()}'''
)
def __lowerCamelCase ( _lowercase , _lowercase ) -> Optional[Any]:
return (3 * x**2) - (6 * y)
a : Tuple = SearchProblem(x=3, y=4, step_size=1, function_to_optimize=test_fa)
a : Any = simulated_annealing(prob, find_max=False, visualization=True)
print(
"""The minimum score for f(x, y) = 3*x^2 - 6*y found via hill climbing: """
F'''{local_min.score()}'''
)
a : List[Any] = SearchProblem(x=3, y=4, step_size=1, function_to_optimize=test_fa)
a : Dict = simulated_annealing(prob, find_max=True, visualization=True)
print(
"""The maximum score for f(x, y) = 3*x^2 - 6*y found via hill climbing: """
F'''{local_min.score()}'''
)
| 672 | 1 |
'''simple docstring'''
a : Optional[Any] = """
# Transformers installation
! pip install transformers datasets
# To install from source instead of the last release, comment the command above and uncomment the following one.
# ! pip install git+https://github.com/huggingface/transformers.git
"""
a : Optional[Any] = [{"""type""": """code""", """content""": INSTALL_CONTENT}]
a : List[Any] = {
"""{processor_class}""": """FakeProcessorClass""",
"""{model_class}""": """FakeModelClass""",
"""{object_class}""": """FakeObjectClass""",
}
| 672 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
a : Any = {
"""configuration_nezha""": ["""NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP""", """NezhaConfig"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : Any = [
"""NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""NezhaForNextSentencePrediction""",
"""NezhaForMaskedLM""",
"""NezhaForPreTraining""",
"""NezhaForMultipleChoice""",
"""NezhaForQuestionAnswering""",
"""NezhaForSequenceClassification""",
"""NezhaForTokenClassification""",
"""NezhaModel""",
"""NezhaPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_nezha import NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP, NezhaConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_nezha import (
NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST,
NezhaForMaskedLM,
NezhaForMultipleChoice,
NezhaForNextSentencePrediction,
NezhaForPreTraining,
NezhaForQuestionAnswering,
NezhaForSequenceClassification,
NezhaForTokenClassification,
NezhaModel,
NezhaPreTrainedModel,
)
else:
import sys
a : Union[str, Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 672 | 1 |
'''simple docstring'''
from typing import List, Union
import numpy as np
from ..tokenization_utils import TruncationStrategy
from ..utils import add_end_docstrings, logging
from .base import PIPELINE_INIT_ARGS, ArgumentHandler, ChunkPipeline
a : List[str] = logging.get_logger(__name__)
class UpperCamelCase_ ( __magic_name__ ):
def _lowercase( self , A ) -> Optional[int]:
if isinstance(A , A ):
UpperCAmelCase : Union[str, Any] = [label.strip() for label in labels.split(""",""" ) if label.strip()]
return labels
def __call__( self , A , A , A ) -> str:
if len(A ) == 0 or len(A ) == 0:
raise ValueError("""You must include at least one label and at least one sequence.""" )
if hypothesis_template.format(labels[0] ) == hypothesis_template:
raise ValueError(
(
"""The provided hypothesis_template \"{}\" was not able to be formatted with the target labels. """
"""Make sure the passed template includes formatting syntax such as {{}} where the label should go."""
).format(A ) )
if isinstance(A , A ):
UpperCAmelCase : Tuple = [sequences]
UpperCAmelCase : Optional[Any] = []
for sequence in sequences:
sequence_pairs.extend([[sequence, hypothesis_template.format(A )] for label in labels] )
return sequence_pairs, sequences
@add_end_docstrings(__magic_name__ )
class UpperCamelCase_ ( __magic_name__ ):
def __init__( self , A=ZeroShotClassificationArgumentHandler() , *A , **A ) -> Optional[int]:
UpperCAmelCase : Tuple = args_parser
super().__init__(*A , **A )
if self.entailment_id == -1:
logger.warning(
"""Failed to determine 'entailment' label id from the label2id mapping in the model config. Setting to """
"""-1. Define a descriptive label2id mapping in the model config to ensure correct outputs.""" )
@property
def _lowercase( self ) -> List[Any]:
for label, ind in self.model.config.labelaid.items():
if label.lower().startswith("""entail""" ):
return ind
return -1
def _lowercase( self , A , A=True , A=True , A=TruncationStrategy.ONLY_FIRST , **A ) -> str:
UpperCAmelCase : Tuple = self.framework
if self.tokenizer.pad_token is None:
# Override for tokenizers not supporting padding
logger.error(
"""Tokenizer was not supporting padding necessary for zero-shot, attempting to use """
""" `pad_token=eos_token`""" )
UpperCAmelCase : Any = self.tokenizer.eos_token
try:
UpperCAmelCase : Tuple = self.tokenizer(
A , add_special_tokens=A , return_tensors=A , padding=A , truncation=A , )
except Exception as e:
if "too short" in str(A ):
# tokenizers might yell that we want to truncate
# to a value that is not even reached by the input.
# In that case we don't want to truncate.
# It seems there's not a really better way to catch that
# exception.
UpperCAmelCase : List[str] = self.tokenizer(
A , add_special_tokens=A , return_tensors=A , padding=A , truncation=TruncationStrategy.DO_NOT_TRUNCATE , )
else:
raise e
return inputs
def _lowercase( self , **A ) -> Tuple:
if kwargs.get("""multi_class""" , A ) is not None:
UpperCAmelCase : Any = kwargs["""multi_class"""]
logger.warning(
"""The `multi_class` argument has been deprecated and renamed to `multi_label`. """
"""`multi_class` will be removed in a future version of Transformers.""" )
UpperCAmelCase : int = {}
if "candidate_labels" in kwargs:
UpperCAmelCase : Tuple = self._args_parser._parse_labels(kwargs["""candidate_labels"""] )
if "hypothesis_template" in kwargs:
UpperCAmelCase : List[Any] = kwargs["""hypothesis_template"""]
UpperCAmelCase : Dict = {}
if "multi_label" in kwargs:
UpperCAmelCase : Union[str, Any] = kwargs["""multi_label"""]
return preprocess_params, {}, postprocess_params
def __call__( self , A , *A , **A , ) -> Tuple:
if len(A ) == 0:
pass
elif len(A ) == 1 and "candidate_labels" not in kwargs:
UpperCAmelCase : Optional[Any] = args[0]
else:
raise ValueError(f'''Unable to understand extra arguments {args}''' )
return super().__call__(A , **A )
def _lowercase( self , A , A=None , A="This example is {}." ) -> List[Any]:
UpperCAmelCase , UpperCAmelCase : List[Any] = self._args_parser(A , A , A )
for i, (candidate_label, sequence_pair) in enumerate(zip(A , A ) ):
UpperCAmelCase : Any = self._parse_and_tokenize([sequence_pair] )
yield {
"candidate_label": candidate_label,
"sequence": sequences[0],
"is_last": i == len(A ) - 1,
**model_input,
}
def _lowercase( self , A ) -> Optional[int]:
UpperCAmelCase : Optional[Any] = inputs["""candidate_label"""]
UpperCAmelCase : Tuple = inputs["""sequence"""]
UpperCAmelCase : List[Any] = {k: inputs[k] for k in self.tokenizer.model_input_names}
UpperCAmelCase : Tuple = self.model(**A )
UpperCAmelCase : Optional[int] = {
"""candidate_label""": candidate_label,
"""sequence""": sequence,
"""is_last""": inputs["""is_last"""],
**outputs,
}
return model_outputs
def _lowercase( self , A , A=False ) -> List[str]:
UpperCAmelCase : Dict = [outputs["""candidate_label"""] for outputs in model_outputs]
UpperCAmelCase : List[Any] = [outputs["""sequence"""] for outputs in model_outputs]
UpperCAmelCase : List[Any] = np.concatenate([output["""logits"""].numpy() for output in model_outputs] )
UpperCAmelCase : Optional[Any] = logits.shape[0]
UpperCAmelCase : int = len(A )
UpperCAmelCase : List[Any] = N // n
UpperCAmelCase : int = logits.reshape((num_sequences, n, -1) )
if multi_label or len(A ) == 1:
# softmax over the entailment vs. contradiction dim for each label independently
UpperCAmelCase : str = self.entailment_id
UpperCAmelCase : str = -1 if entailment_id == 0 else 0
UpperCAmelCase : Optional[Any] = reshaped_outputs[..., [contradiction_id, entailment_id]]
UpperCAmelCase : int = np.exp(A ) / np.exp(A ).sum(-1 , keepdims=A )
UpperCAmelCase : int = scores[..., 1]
else:
# softmax the "entailment" logits over all candidate labels
UpperCAmelCase : Dict = reshaped_outputs[..., self.entailment_id]
UpperCAmelCase : Optional[int] = np.exp(A ) / np.exp(A ).sum(-1 , keepdims=A )
UpperCAmelCase : int = list(reversed(scores[0].argsort() ) )
return {
"sequence": sequences[0],
"labels": [candidate_labels[i] for i in top_inds],
"scores": scores[0, top_inds].tolist(),
}
| 672 |
'''simple docstring'''
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionImageVariationPipeline
from diffusers.utils.testing_utils import load_image, require_torch_gpu, slow, torch_device
a : Tuple = False
class UpperCamelCase_ ( unittest.TestCase ):
pass
@slow
@require_torch_gpu
class UpperCamelCase_ ( unittest.TestCase ):
def _lowercase( self ) -> Optional[Any]:
UpperCAmelCase : Any = VersatileDiffusionImageVariationPipeline.from_pretrained("""shi-labs/versatile-diffusion""" )
pipe.to(A )
pipe.set_progress_bar_config(disable=A )
UpperCAmelCase : List[str] = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg""" )
UpperCAmelCase : List[Any] = torch.manual_seed(0 )
UpperCAmelCase : List[str] = pipe(
image=A , generator=A , guidance_scale=7.5 , num_inference_steps=50 , output_type="""numpy""" , ).images
UpperCAmelCase : Tuple = image[0, 253:256, 253:256, -1]
assert image.shape == (1, 512, 512, 3)
UpperCAmelCase : List[str] = np.array([0.0_4_4_1, 0.0_4_6_9, 0.0_5_0_7, 0.0_5_7_5, 0.0_6_3_2, 0.0_6_5_0, 0.0_8_6_5, 0.0_9_0_9, 0.0_9_4_5] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 672 | 1 |
'''simple docstring'''
import string
def __lowerCamelCase ( _lowercase ) -> str:
UpperCAmelCase : Any = """"""
for i in sequence:
UpperCAmelCase : str = ord(_lowercase )
if 6_5 <= extract <= 9_0:
output += chr(1_5_5 - extract )
elif 9_7 <= extract <= 1_2_2:
output += chr(2_1_9 - extract )
else:
output += i
return output
def __lowerCamelCase ( _lowercase ) -> str:
UpperCAmelCase : Union[str, Any] = string.ascii_letters
UpperCAmelCase : Union[str, Any] = string.ascii_lowercase[::-1] + string.ascii_uppercase[::-1]
return "".join(
letters_reversed[letters.index(_lowercase )] if c in letters else c for c in sequence )
def __lowerCamelCase ( ) -> None:
from timeit import timeit
print("""Running performance benchmarks...""" )
UpperCAmelCase : Any = """from string import printable ; from __main__ import atbash, atbash_slow"""
print(F'''> atbash_slow(): {timeit('atbash_slow(printable)' , setup=_lowercase )} seconds''' )
print(F'''> atbash(): {timeit('atbash(printable)' , setup=_lowercase )} seconds''' )
if __name__ == "__main__":
for example in ("ABCDEFGH", "123GGjj", "testStringtest", "with space"):
print(F'''{example} encrypted in atbash: {atbash(example)}''')
benchmark()
| 672 |
'''simple docstring'''
# Lint as: python3
import sys
from collections.abc import Mapping
from typing import TYPE_CHECKING, Dict, Optional
import numpy as np
import pyarrow as pa
from .. import config
from ..utils.logging import get_logger
from ..utils.py_utils import map_nested
from .formatting import TensorFormatter
if TYPE_CHECKING:
import jax
import jaxlib
a : Any = get_logger()
a : Optional[dict] = None
class UpperCamelCase_ ( TensorFormatter[Mapping, 'jax.Array', Mapping] ):
def __init__( self , A=None , A=None , **A ) -> str:
super().__init__(features=A )
import jax
from jaxlib.xla_client import Device
if isinstance(A , A ):
raise ValueError(
f'''Expected {device} to be a `str` not {type(A )}, as `jaxlib.xla_extension.Device` '''
"""is not serializable neither with `pickle` nor with `dill`. Instead you can surround """
"""the device with `str()` to get its string identifier that will be internally mapped """
"""to the actual `jaxlib.xla_extension.Device`.""" )
UpperCAmelCase : Optional[int] = device if isinstance(A , A ) else str(jax.devices()[0] )
# using global variable since `jaxlib.xla_extension.Device` is not serializable neither
# with `pickle` nor with `dill`, so we need to use a global variable instead
global DEVICE_MAPPING
if DEVICE_MAPPING is None:
UpperCAmelCase : Any = self._map_devices_to_str()
if self.device not in list(DEVICE_MAPPING.keys() ):
logger.warning(
f'''Device with string identifier {self.device} not listed among the available '''
f'''devices: {list(DEVICE_MAPPING.keys() )}, so falling back to the default '''
f'''device: {str(jax.devices()[0] )}.''' )
UpperCAmelCase : List[Any] = str(jax.devices()[0] )
UpperCAmelCase : Union[str, Any] = jnp_array_kwargs
@staticmethod
def _lowercase( ) -> Dict[str, "jaxlib.xla_extension.Device"]:
import jax
return {str(A ): device for device in jax.devices()}
def _lowercase( self , A ) -> str:
import jax
import jax.numpy as jnp
if isinstance(A , A ) and column:
if all(
isinstance(A , jax.Array ) and x.shape == column[0].shape and x.dtype == column[0].dtype for x in column ):
return jnp.stack(A , axis=0 )
return column
def _lowercase( self , A ) -> Tuple:
import jax
import jax.numpy as jnp
if isinstance(A , (str, bytes, type(A )) ):
return value
elif isinstance(A , (np.character, np.ndarray) ) and np.issubdtype(value.dtype , np.character ):
return value.tolist()
UpperCAmelCase : List[str] = {}
if isinstance(A , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.integer ):
# the default int precision depends on the jax config
# see https://jax.readthedocs.io/en/latest/notebooks/Common_Gotchas_in_JAX.html#double-64bit-precision
if jax.config.jax_enable_xaa:
UpperCAmelCase : str = {"""dtype""": jnp.intaa}
else:
UpperCAmelCase : int = {"""dtype""": jnp.intaa}
elif isinstance(A , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.floating ):
UpperCAmelCase : Any = {"""dtype""": jnp.floataa}
elif config.PIL_AVAILABLE and "PIL" in sys.modules:
import PIL.Image
if isinstance(A , PIL.Image.Image ):
UpperCAmelCase : List[str] = np.asarray(A )
# using global variable since `jaxlib.xla_extension.Device` is not serializable neither
# with `pickle` nor with `dill`, so we need to use a global variable instead
global DEVICE_MAPPING
if DEVICE_MAPPING is None:
UpperCAmelCase : Dict = self._map_devices_to_str()
with jax.default_device(DEVICE_MAPPING[self.device] ):
# calling jnp.array on a np.ndarray does copy the data
# see https://github.com/google/jax/issues/4486
return jnp.array(A , **{**default_dtype, **self.jnp_array_kwargs} )
def _lowercase( self , A ) -> Tuple:
import jax
# support for torch, tf, jax etc.
if config.TORCH_AVAILABLE and "torch" in sys.modules:
import torch
if isinstance(A , torch.Tensor ):
return self._tensorize(data_struct.detach().cpu().numpy()[()] )
if hasattr(A , """__array__""" ) and not isinstance(A , jax.Array ):
UpperCAmelCase : Optional[int] = data_struct.__array__()
# support for nested types like struct of list of struct
if isinstance(A , np.ndarray ):
if data_struct.dtype == object: # jax arrays cannot be instantied from an array of objects
return self._consolidate([self.recursive_tensorize(A ) for substruct in data_struct] )
elif isinstance(A , (list, tuple) ):
return self._consolidate([self.recursive_tensorize(A ) for substruct in data_struct] )
return self._tensorize(A )
def _lowercase( self , A ) -> Dict:
return map_nested(self._recursive_tensorize , A , map_list=A )
def _lowercase( self , A ) -> Mapping:
UpperCAmelCase : Union[str, Any] = self.numpy_arrow_extractor().extract_row(A )
UpperCAmelCase : Dict = self.python_features_decoder.decode_row(A )
return self.recursive_tensorize(A )
def _lowercase( self , A ) -> "jax.Array":
UpperCAmelCase : int = self.numpy_arrow_extractor().extract_column(A )
UpperCAmelCase : Optional[Any] = self.python_features_decoder.decode_column(A , pa_table.column_names[0] )
UpperCAmelCase : Optional[int] = self.recursive_tensorize(A )
UpperCAmelCase : Any = self._consolidate(A )
return column
def _lowercase( self , A ) -> Mapping:
UpperCAmelCase : Optional[int] = self.numpy_arrow_extractor().extract_batch(A )
UpperCAmelCase : List[str] = self.python_features_decoder.decode_batch(A )
UpperCAmelCase : Union[str, Any] = self.recursive_tensorize(A )
for column_name in batch:
UpperCAmelCase : Optional[Any] = self._consolidate(batch[column_name] )
return batch
| 672 | 1 |
'''simple docstring'''
import random
import unittest
import numpy as np
import torch
from diffusers import (
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
OnnxStableDiffusionUpscalePipeline,
PNDMScheduler,
)
from diffusers.utils import floats_tensor
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
nightly,
require_onnxruntime,
require_torch_gpu,
)
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class UpperCamelCase_ ( __magic_name__ , unittest.TestCase ):
# TODO: is there an appropriate internal test set?
lowercase = 'ssube/stable-diffusion-x4-upscaler-onnx'
def _lowercase( self , A=0 ) -> Dict:
UpperCAmelCase : Union[str, Any] = floats_tensor((1, 3, 128, 128) , rng=random.Random(A ) )
UpperCAmelCase : Optional[Any] = torch.manual_seed(A )
UpperCAmelCase : int = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""image""": image,
"""generator""": generator,
"""num_inference_steps""": 3,
"""guidance_scale""": 7.5,
"""output_type""": """numpy""",
}
return inputs
def _lowercase( self ) -> str:
UpperCAmelCase : Optional[int] = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" )
pipe.set_progress_bar_config(disable=A )
UpperCAmelCase : int = self.get_dummy_inputs()
UpperCAmelCase : List[str] = pipe(**A ).images
UpperCAmelCase : Optional[Any] = image[0, -3:, -3:, -1].flatten()
# started as 128, should now be 512
assert image.shape == (1, 512, 512, 3)
UpperCAmelCase : Dict = np.array(
[0.6_9_7_4_7_8_2, 0.6_8_9_0_2_0_9_3, 0.7_0_1_3_5_8_8_5, 0.7_5_8_3_6_1_8, 0.7_8_0_4_5_4_5, 0.7_8_5_4_9_1_2, 0.7_8_6_6_7_4_2_6, 0.7_8_7_4_3_8_6_3, 0.7_8_0_7_0_2_2_3] )
assert np.abs(image_slice - expected_slice ).max() < 1e-1
def _lowercase( self ) -> List[str]:
UpperCAmelCase : Dict = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" )
UpperCAmelCase : Optional[int] = PNDMScheduler.from_config(pipe.scheduler.config , skip_prk_steps=A )
pipe.set_progress_bar_config(disable=A )
UpperCAmelCase : Any = self.get_dummy_inputs()
UpperCAmelCase : List[Any] = pipe(**A ).images
UpperCAmelCase : Any = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
UpperCAmelCase : Any = np.array(
[0.6_8_9_8_8_9_2, 0.5_9_2_4_0_5_5_6, 0.5_2_4_9_9_5_2_7, 0.5_8_8_6_6_2_1_5, 0.5_2_2_5_8_2_3_5, 0.5_2_5_7_2_7_1_5, 0.6_2_4_1_4_4_7_3, 0.6_1_7_4_3_8_7, 0.6_2_1_4_9_6_4] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
def _lowercase( self ) -> List[Any]:
UpperCAmelCase : Optional[Any] = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" )
UpperCAmelCase : Optional[int] = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=A )
UpperCAmelCase : List[Any] = self.get_dummy_inputs()
UpperCAmelCase : int = pipe(**A ).images
UpperCAmelCase : Union[str, Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
UpperCAmelCase : List[str] = np.array(
[0.7_6_5_9_2_7_8, 0.7_6_4_3_7_6_6_4, 0.7_5_5_7_9_1_0_7, 0.7_6_9_1_1_1_6, 0.7_7_6_6_6_9_8_6, 0.7_7_2_7_6_7_2, 0.7_7_5_8_6_6_4, 0.7_8_1_2_2_2_6, 0.7_6_9_4_2_5_1_5] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
def _lowercase( self ) -> Optional[int]:
UpperCAmelCase : int = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" )
UpperCAmelCase : Optional[Any] = EulerDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=A )
UpperCAmelCase : Any = self.get_dummy_inputs()
UpperCAmelCase : Tuple = pipe(**A ).images
UpperCAmelCase : Optional[Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
UpperCAmelCase : Optional[Any] = np.array(
[0.6_9_7_4_7_8_2, 0.6_8_9_0_2_0_9_3, 0.7_0_1_3_5_8_8_5, 0.7_5_8_3_6_1_8, 0.7_8_0_4_5_4_5, 0.7_8_5_4_9_1_2, 0.7_8_6_6_7_4_2_6, 0.7_8_7_4_3_8_6_3, 0.7_8_0_7_0_2_2_3] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
def _lowercase( self ) -> Tuple:
UpperCAmelCase : Optional[Any] = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" )
UpperCAmelCase : int = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=A )
UpperCAmelCase : str = self.get_dummy_inputs()
UpperCAmelCase : int = pipe(**A ).images
UpperCAmelCase : Tuple = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
UpperCAmelCase : int = np.array(
[0.7_7_4_2_4_4_9_6, 0.7_7_3_6_0_1, 0.7_6_4_5_2_8_8, 0.7_7_6_9_5_9_8, 0.7_7_7_2_7_3_9, 0.7_7_3_8_6_8_8, 0.7_8_1_8_7_2_3_3, 0.7_7_8_7_9_5_8_4, 0.7_6_7_0_4_3] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
@nightly
@require_onnxruntime
@require_torch_gpu
class UpperCamelCase_ ( unittest.TestCase ):
@property
def _lowercase( self ) -> Dict:
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def _lowercase( self ) -> str:
UpperCAmelCase : Union[str, Any] = ort.SessionOptions()
UpperCAmelCase : Any = False
return options
def _lowercase( self ) -> str:
UpperCAmelCase : Any = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/img2img/sketch-mountains-input.jpg""" )
UpperCAmelCase : Optional[int] = init_image.resize((128, 128) )
# using the PNDM scheduler by default
UpperCAmelCase : List[str] = OnnxStableDiffusionUpscalePipeline.from_pretrained(
"""ssube/stable-diffusion-x4-upscaler-onnx""" , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=A )
UpperCAmelCase : List[str] = """A fantasy landscape, trending on artstation"""
UpperCAmelCase : str = torch.manual_seed(0 )
UpperCAmelCase : Optional[int] = pipe(
prompt=A , image=A , guidance_scale=7.5 , num_inference_steps=10 , generator=A , output_type="""np""" , )
UpperCAmelCase : int = output.images
UpperCAmelCase : Any = images[0, 255:258, 383:386, -1]
assert images.shape == (1, 512, 512, 3)
UpperCAmelCase : Optional[Any] = np.array([0.4_8_8_3, 0.4_9_4_7, 0.4_9_8_0, 0.4_9_7_5, 0.4_9_8_2, 0.4_9_8_0, 0.5_0_0_0, 0.5_0_0_6, 0.4_9_7_2] )
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2e-2
def _lowercase( self ) -> Dict:
UpperCAmelCase : Tuple = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/img2img/sketch-mountains-input.jpg""" )
UpperCAmelCase : Optional[Any] = init_image.resize((128, 128) )
UpperCAmelCase : Any = LMSDiscreteScheduler.from_pretrained(
"""ssube/stable-diffusion-x4-upscaler-onnx""" , subfolder="""scheduler""" )
UpperCAmelCase : int = OnnxStableDiffusionUpscalePipeline.from_pretrained(
"""ssube/stable-diffusion-x4-upscaler-onnx""" , scheduler=A , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=A )
UpperCAmelCase : List[Any] = """A fantasy landscape, trending on artstation"""
UpperCAmelCase : Optional[Any] = torch.manual_seed(0 )
UpperCAmelCase : List[str] = pipe(
prompt=A , image=A , guidance_scale=7.5 , num_inference_steps=20 , generator=A , output_type="""np""" , )
UpperCAmelCase : Optional[int] = output.images
UpperCAmelCase : Optional[int] = images[0, 255:258, 383:386, -1]
assert images.shape == (1, 512, 512, 3)
UpperCAmelCase : int = np.array(
[0.5_0_1_7_3_7_5_3, 0.5_0_2_2_3_3_5_6, 0.5_0_2_0_3_9, 0.5_0_2_3_3_0_3_6, 0.5_0_2_3_7_2_5, 0.5_0_2_2_6_0_1, 0.5_0_1_8_7_5_8, 0.5_0_2_3_4_0_8_5, 0.5_0_2_4_1_5_6_6] )
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2e-2
| 672 |
'''simple docstring'''
from datetime import datetime as dt
import os
from github import Github
a : int = [
"""good first issue""",
"""good second issue""",
"""good difficult issue""",
"""feature request""",
"""new model""",
"""wip""",
]
def __lowerCamelCase ( ) -> Dict:
UpperCAmelCase : str = Github(os.environ["""GITHUB_TOKEN"""] )
UpperCAmelCase : Dict = g.get_repo("""huggingface/transformers""" )
UpperCAmelCase : int = repo.get_issues(state="""open""" )
for issue in open_issues:
UpperCAmelCase : Optional[int] = sorted([comment for comment in issue.get_comments()] , key=lambda _lowercase : i.created_at , reverse=_lowercase )
UpperCAmelCase : Any = comments[0] if len(_lowercase ) > 0 else None
if (
last_comment is not None
and last_comment.user.login == "github-actions[bot]"
and (dt.utcnow() - issue.updated_at).days > 7
and (dt.utcnow() - issue.created_at).days >= 3_0
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# print(f"Would close issue {issue.number} since it has been 7 days of inactivity since bot mention.")
issue.edit(state="""closed""" )
elif (
(dt.utcnow() - issue.updated_at).days > 2_3
and (dt.utcnow() - issue.created_at).days >= 3_0
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# print(f"Would add stale comment to {issue.number}")
issue.create_comment(
"""This issue has been automatically marked as stale because it has not had """
"""recent activity. If you think this still needs to be addressed """
"""please comment on this thread.\n\nPlease note that issues that do not follow the """
"""[contributing guidelines](https://github.com/huggingface/transformers/blob/main/CONTRIBUTING.md) """
"""are likely to be ignored.""" )
if __name__ == "__main__":
main()
| 672 | 1 |
'''simple docstring'''
import argparse
import json
from typing import List
from ltp import LTP
from transformers import BertTokenizer
def __lowerCamelCase ( _lowercase ) -> Any:
# This defines a "chinese character" as anything in the CJK Unicode block:
# https://en.wikipedia.org/wiki/CJK_Unified_Ideographs_(Unicode_block)
#
# Note that the CJK Unicode block is NOT all Japanese and Korean characters,
# despite its name. The modern Korean Hangul alphabet is a different block,
# as is Japanese Hiragana and Katakana. Those alphabets are used to write
# space-separated words, so they are not treated specially and handled
# like the all of the other languages.
if (
(cp >= 0x4_E00 and cp <= 0x9_FFF)
or (cp >= 0x3_400 and cp <= 0x4_DBF) #
or (cp >= 0x20_000 and cp <= 0x2A_6DF) #
or (cp >= 0x2A_700 and cp <= 0x2B_73F) #
or (cp >= 0x2B_740 and cp <= 0x2B_81F) #
or (cp >= 0x2B_820 and cp <= 0x2C_EAF) #
or (cp >= 0xF_900 and cp <= 0xF_AFF)
or (cp >= 0x2F_800 and cp <= 0x2F_A1F) #
): #
return True
return False
def __lowerCamelCase ( _lowercase ) -> Union[str, Any]:
# word like '180' or '身高' or '神'
for char in word:
UpperCAmelCase : int = ord(_lowercase )
if not _is_chinese_char(_lowercase ):
return 0
return 1
def __lowerCamelCase ( _lowercase ) -> Tuple:
UpperCAmelCase : Dict = set()
for token in tokens:
UpperCAmelCase : Any = len(_lowercase ) > 1 and is_chinese(_lowercase )
if chinese_word:
word_set.add(_lowercase )
UpperCAmelCase : List[Any] = list(_lowercase )
return word_list
def __lowerCamelCase ( _lowercase , _lowercase ) -> Tuple:
if not chinese_word_set:
return bert_tokens
UpperCAmelCase : List[str] = max([len(_lowercase ) for w in chinese_word_set] )
UpperCAmelCase : List[Any] = bert_tokens
UpperCAmelCase , UpperCAmelCase : Union[str, Any] = 0, len(_lowercase )
while start < end:
UpperCAmelCase : Any = True
if is_chinese(bert_word[start] ):
UpperCAmelCase : str = min(end - start , _lowercase )
for i in range(_lowercase , 1 , -1 ):
UpperCAmelCase : Any = """""".join(bert_word[start : start + i] )
if whole_word in chinese_word_set:
for j in range(start + 1 , start + i ):
UpperCAmelCase : Optional[int] = """##""" + bert_word[j]
UpperCAmelCase : Optional[Any] = start + i
UpperCAmelCase : Any = False
break
if single_word:
start += 1
return bert_word
def __lowerCamelCase ( _lowercase , _lowercase , _lowercase ) -> Union[str, Any]:
UpperCAmelCase : Tuple = []
for i in range(0 , len(_lowercase ) , 1_0_0 ):
UpperCAmelCase : int = ltp_tokenizer.seg(lines[i : i + 1_0_0] )[0]
UpperCAmelCase : Optional[int] = [get_chinese_word(_lowercase ) for r in res]
ltp_res.extend(_lowercase )
assert len(_lowercase ) == len(_lowercase )
UpperCAmelCase : str = []
for i in range(0 , len(_lowercase ) , 1_0_0 ):
UpperCAmelCase : Tuple = bert_tokenizer(lines[i : i + 1_0_0] , add_special_tokens=_lowercase , truncation=_lowercase , max_length=5_1_2 )
bert_res.extend(res["""input_ids"""] )
assert len(_lowercase ) == len(_lowercase )
UpperCAmelCase : List[Any] = []
for input_ids, chinese_word in zip(_lowercase , _lowercase ):
UpperCAmelCase : Any = []
for id in input_ids:
UpperCAmelCase : int = bert_tokenizer._convert_id_to_token(_lowercase )
input_tokens.append(_lowercase )
UpperCAmelCase : List[str] = add_sub_symbol(_lowercase , _lowercase )
UpperCAmelCase : List[Any] = []
# We only save pos of chinese subwords start with ##, which mean is part of a whole word.
for i, token in enumerate(_lowercase ):
if token[:2] == "##":
UpperCAmelCase : int = token[2:]
# save chinese tokens' pos
if len(_lowercase ) == 1 and _is_chinese_char(ord(_lowercase ) ):
ref_id.append(_lowercase )
ref_ids.append(_lowercase )
assert len(_lowercase ) == len(_lowercase )
return ref_ids
def __lowerCamelCase ( _lowercase ) -> Any:
# For Chinese (Ro)Bert, the best result is from : RoBERTa-wwm-ext (https://github.com/ymcui/Chinese-BERT-wwm)
# If we want to fine-tune these model, we have to use same tokenizer : LTP (https://github.com/HIT-SCIR/ltp)
with open(args.file_name , """r""" , encoding="""utf-8""" ) as f:
UpperCAmelCase : List[str] = f.readlines()
UpperCAmelCase : int = [line.strip() for line in data if len(_lowercase ) > 0 and not line.isspace()] # avoid delimiter like '\u2029'
UpperCAmelCase : Tuple = LTP(args.ltp ) # faster in GPU device
UpperCAmelCase : Any = BertTokenizer.from_pretrained(args.bert )
UpperCAmelCase : int = prepare_ref(_lowercase , _lowercase , _lowercase )
with open(args.save_path , """w""" , encoding="""utf-8""" ) as f:
UpperCAmelCase : Optional[int] = [json.dumps(_lowercase ) + """\n""" for ref in ref_ids]
f.writelines(_lowercase )
if __name__ == "__main__":
a : str = argparse.ArgumentParser(description="""prepare_chinese_ref""")
parser.add_argument(
"""--file_name""",
type=str,
default="""./resources/chinese-demo.txt""",
help="""file need process, same as training data in lm""",
)
parser.add_argument(
"""--ltp""", type=str, default="""./resources/ltp""", help="""resources for LTP tokenizer, usually a path"""
)
parser.add_argument("""--bert""", type=str, default="""./resources/robert""", help="""resources for Bert tokenizer""")
parser.add_argument("""--save_path""", type=str, default="""./resources/ref.txt""", help="""path to save res""")
a : Any = parser.parse_args()
main(args)
| 672 |
'''simple docstring'''
import unittest
from parameterized import parameterized
from transformers import OpenLlamaConfig, is_torch_available, set_seed
from transformers.testing_utils import require_torch, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import OpenLlamaForCausalLM, OpenLlamaForSequenceClassification, OpenLlamaModel
class UpperCamelCase_ :
def __init__( self , A , A=13 , A=7 , A=True , A=True , A=False , A=True , A=99 , A=32 , A=5 , A=4 , A=37 , A="gelu" , A=0.1 , A=0.1 , A=512 , A=16 , A=2 , A=0.0_2 , A=3 , A=4 , A=None , ) -> Any:
UpperCAmelCase : Optional[int] = parent
UpperCAmelCase : List[Any] = batch_size
UpperCAmelCase : Union[str, Any] = seq_length
UpperCAmelCase : Optional[int] = is_training
UpperCAmelCase : str = use_input_mask
UpperCAmelCase : Optional[int] = use_token_type_ids
UpperCAmelCase : Dict = use_labels
UpperCAmelCase : str = vocab_size
UpperCAmelCase : Optional[int] = hidden_size
UpperCAmelCase : str = num_hidden_layers
UpperCAmelCase : Any = num_attention_heads
UpperCAmelCase : Union[str, Any] = intermediate_size
UpperCAmelCase : str = hidden_act
UpperCAmelCase : Any = hidden_dropout_prob
UpperCAmelCase : str = attention_probs_dropout_prob
UpperCAmelCase : Tuple = max_position_embeddings
UpperCAmelCase : Optional[Any] = type_vocab_size
UpperCAmelCase : Optional[Any] = type_sequence_label_size
UpperCAmelCase : str = initializer_range
UpperCAmelCase : List[Any] = num_labels
UpperCAmelCase : Dict = num_choices
UpperCAmelCase : Tuple = scope
def _lowercase( self ) -> Dict:
UpperCAmelCase : Dict = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCAmelCase : Optional[Any] = None
if self.use_input_mask:
UpperCAmelCase : Optional[int] = random_attention_mask([self.batch_size, self.seq_length] )
UpperCAmelCase : Optional[Any] = None
if self.use_token_type_ids:
UpperCAmelCase : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
UpperCAmelCase : Union[str, Any] = None
UpperCAmelCase : Dict = None
UpperCAmelCase : Union[str, Any] = None
if self.use_labels:
UpperCAmelCase : Optional[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCAmelCase : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
UpperCAmelCase : Optional[Any] = ids_tensor([self.batch_size] , self.num_choices )
UpperCAmelCase : int = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def _lowercase( self ) -> Dict:
return OpenLlamaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=A , initializer_range=self.initializer_range , use_stable_embedding=A , )
def _lowercase( self , A , A , A , A , A , A , A ) -> str:
UpperCAmelCase : Union[str, Any] = OpenLlamaModel(config=A )
model.to(A )
model.eval()
UpperCAmelCase : Dict = model(A , attention_mask=A )
UpperCAmelCase : Optional[int] = model(A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _lowercase( self , A , A , A , A , A , A , A , A , A , ) -> List[Any]:
UpperCAmelCase : Optional[int] = True
UpperCAmelCase : Union[str, Any] = OpenLlamaModel(A )
model.to(A )
model.eval()
UpperCAmelCase : List[Any] = model(
A , attention_mask=A , encoder_hidden_states=A , encoder_attention_mask=A , )
UpperCAmelCase : str = model(
A , attention_mask=A , encoder_hidden_states=A , )
UpperCAmelCase : List[Any] = model(A , attention_mask=A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _lowercase( self , A , A , A , A , A , A , A , A , A , ) -> int:
UpperCAmelCase : Optional[int] = OpenLlamaForCausalLM(config=A )
model.to(A )
model.eval()
UpperCAmelCase : Tuple = model(A , attention_mask=A , labels=A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _lowercase( self , A , A , A , A , A , A , A , A , A , ) -> int:
UpperCAmelCase : Dict = True
UpperCAmelCase : Tuple = True
UpperCAmelCase : str = OpenLlamaForCausalLM(config=A )
model.to(A )
model.eval()
# first forward pass
UpperCAmelCase : Union[str, Any] = model(
A , attention_mask=A , encoder_hidden_states=A , encoder_attention_mask=A , use_cache=A , )
UpperCAmelCase : str = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
UpperCAmelCase : Tuple = ids_tensor((self.batch_size, 3) , config.vocab_size )
UpperCAmelCase : Tuple = ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
UpperCAmelCase : List[str] = torch.cat([input_ids, next_tokens] , dim=-1 )
UpperCAmelCase : List[Any] = torch.cat([input_mask, next_mask] , dim=-1 )
UpperCAmelCase : List[Any] = model(
A , attention_mask=A , encoder_hidden_states=A , encoder_attention_mask=A , output_hidden_states=A , )["""hidden_states"""][0]
UpperCAmelCase : Optional[Any] = model(
A , attention_mask=A , encoder_hidden_states=A , encoder_attention_mask=A , past_key_values=A , output_hidden_states=A , )["""hidden_states"""][0]
# select random slice
UpperCAmelCase : str = ids_tensor((1,) , output_from_past.shape[-1] ).item()
UpperCAmelCase : Any = output_from_no_past[:, -3:, random_slice_idx].detach()
UpperCAmelCase : Dict = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(A , A , atol=1e-3 ) )
def _lowercase( self ) -> Union[str, Any]:
UpperCAmelCase : Union[str, Any] = self.prepare_config_and_inputs()
(
(
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) ,
) : Tuple = config_and_inputs
UpperCAmelCase : str = {"""input_ids""": input_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class UpperCamelCase_ ( __magic_name__ , __magic_name__ , __magic_name__ , unittest.TestCase ):
lowercase = (
(OpenLlamaModel, OpenLlamaForCausalLM, OpenLlamaForSequenceClassification) if is_torch_available() else ()
)
lowercase = (OpenLlamaForCausalLM,) if is_torch_available() else ()
lowercase = (
{
'feature-extraction': OpenLlamaModel,
'text-classification': OpenLlamaForSequenceClassification,
'text-generation': OpenLlamaForCausalLM,
'zero-shot': OpenLlamaForSequenceClassification,
}
if is_torch_available()
else {}
)
lowercase = False
lowercase = False
def _lowercase( self ) -> Tuple:
UpperCAmelCase : Dict = OpenLlamaModelTester(self )
UpperCAmelCase : List[str] = ConfigTester(self , config_class=A , hidden_size=37 )
def _lowercase( self ) -> Optional[Any]:
self.config_tester.run_common_tests()
def _lowercase( self ) -> int:
UpperCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*A )
def _lowercase( self ) -> str:
UpperCAmelCase : Any = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
UpperCAmelCase : int = type
self.model_tester.create_and_check_model(*A )
def _lowercase( self ) -> str:
UpperCAmelCase , UpperCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase : List[str] = 3
UpperCAmelCase : Optional[Any] = input_dict["""input_ids"""]
UpperCAmelCase : str = input_ids.ne(1 ).to(A )
UpperCAmelCase : Tuple = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
UpperCAmelCase : Optional[Any] = OpenLlamaForSequenceClassification(A )
model.to(A )
model.eval()
UpperCAmelCase : List[Any] = model(A , attention_mask=A , labels=A )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def _lowercase( self ) -> Union[str, Any]:
UpperCAmelCase , UpperCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase : Dict = 3
UpperCAmelCase : Any = """single_label_classification"""
UpperCAmelCase : Dict = input_dict["""input_ids"""]
UpperCAmelCase : Optional[Any] = input_ids.ne(1 ).to(A )
UpperCAmelCase : str = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
UpperCAmelCase : Optional[Any] = OpenLlamaForSequenceClassification(A )
model.to(A )
model.eval()
UpperCAmelCase : Tuple = model(A , attention_mask=A , labels=A )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def _lowercase( self ) -> int:
UpperCAmelCase , UpperCAmelCase : Dict = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase : Tuple = 3
UpperCAmelCase : Optional[Any] = """multi_label_classification"""
UpperCAmelCase : Dict = input_dict["""input_ids"""]
UpperCAmelCase : int = input_ids.ne(1 ).to(A )
UpperCAmelCase : int = ids_tensor(
[self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float )
UpperCAmelCase : Any = OpenLlamaForSequenceClassification(A )
model.to(A )
model.eval()
UpperCAmelCase : Dict = model(A , attention_mask=A , labels=A )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
@unittest.skip("""Open-Llama buffers include complex numbers, which breaks this test""" )
def _lowercase( self ) -> Dict:
pass
@parameterized.expand([("""linear""",), ("""dynamic""",)] )
def _lowercase( self , A ) -> str:
UpperCAmelCase , UpperCAmelCase : Any = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase : Dict = ids_tensor([1, 10] , config.vocab_size )
UpperCAmelCase : List[str] = ids_tensor([1, int(config.max_position_embeddings * 1.5 )] , config.vocab_size )
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
UpperCAmelCase : Any = OpenLlamaModel(A )
original_model.to(A )
original_model.eval()
UpperCAmelCase : List[str] = original_model(A ).last_hidden_state
UpperCAmelCase : List[Any] = original_model(A ).last_hidden_state
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
UpperCAmelCase : Union[str, Any] = {"""type""": scaling_type, """factor""": 1_0.0}
UpperCAmelCase : str = OpenLlamaModel(A )
scaled_model.to(A )
scaled_model.eval()
UpperCAmelCase : List[str] = scaled_model(A ).last_hidden_state
UpperCAmelCase : Optional[int] = scaled_model(A ).last_hidden_state
# Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original
# maximum sequence length, so the outputs for the short input should match.
if scaling_type == "dynamic":
self.assertTrue(torch.allclose(A , A , atol=1e-5 ) )
else:
self.assertFalse(torch.allclose(A , A , atol=1e-5 ) )
# The output should be different for long inputs
self.assertFalse(torch.allclose(A , A , atol=1e-5 ) )
| 672 | 1 |
'''simple docstring'''
import unittest
from parameterized import parameterized
from transformers import OpenLlamaConfig, is_torch_available, set_seed
from transformers.testing_utils import require_torch, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import OpenLlamaForCausalLM, OpenLlamaForSequenceClassification, OpenLlamaModel
class UpperCamelCase_ :
def __init__( self , A , A=13 , A=7 , A=True , A=True , A=False , A=True , A=99 , A=32 , A=5 , A=4 , A=37 , A="gelu" , A=0.1 , A=0.1 , A=512 , A=16 , A=2 , A=0.0_2 , A=3 , A=4 , A=None , ) -> Any:
UpperCAmelCase : Optional[int] = parent
UpperCAmelCase : List[Any] = batch_size
UpperCAmelCase : Union[str, Any] = seq_length
UpperCAmelCase : Optional[int] = is_training
UpperCAmelCase : str = use_input_mask
UpperCAmelCase : Optional[int] = use_token_type_ids
UpperCAmelCase : Dict = use_labels
UpperCAmelCase : str = vocab_size
UpperCAmelCase : Optional[int] = hidden_size
UpperCAmelCase : str = num_hidden_layers
UpperCAmelCase : Any = num_attention_heads
UpperCAmelCase : Union[str, Any] = intermediate_size
UpperCAmelCase : str = hidden_act
UpperCAmelCase : Any = hidden_dropout_prob
UpperCAmelCase : str = attention_probs_dropout_prob
UpperCAmelCase : Tuple = max_position_embeddings
UpperCAmelCase : Optional[Any] = type_vocab_size
UpperCAmelCase : Optional[Any] = type_sequence_label_size
UpperCAmelCase : str = initializer_range
UpperCAmelCase : List[Any] = num_labels
UpperCAmelCase : Dict = num_choices
UpperCAmelCase : Tuple = scope
def _lowercase( self ) -> Dict:
UpperCAmelCase : Dict = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCAmelCase : Optional[Any] = None
if self.use_input_mask:
UpperCAmelCase : Optional[int] = random_attention_mask([self.batch_size, self.seq_length] )
UpperCAmelCase : Optional[Any] = None
if self.use_token_type_ids:
UpperCAmelCase : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
UpperCAmelCase : Union[str, Any] = None
UpperCAmelCase : Dict = None
UpperCAmelCase : Union[str, Any] = None
if self.use_labels:
UpperCAmelCase : Optional[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCAmelCase : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
UpperCAmelCase : Optional[Any] = ids_tensor([self.batch_size] , self.num_choices )
UpperCAmelCase : int = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def _lowercase( self ) -> Dict:
return OpenLlamaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=A , initializer_range=self.initializer_range , use_stable_embedding=A , )
def _lowercase( self , A , A , A , A , A , A , A ) -> str:
UpperCAmelCase : Union[str, Any] = OpenLlamaModel(config=A )
model.to(A )
model.eval()
UpperCAmelCase : Dict = model(A , attention_mask=A )
UpperCAmelCase : Optional[int] = model(A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _lowercase( self , A , A , A , A , A , A , A , A , A , ) -> List[Any]:
UpperCAmelCase : Optional[int] = True
UpperCAmelCase : Union[str, Any] = OpenLlamaModel(A )
model.to(A )
model.eval()
UpperCAmelCase : List[Any] = model(
A , attention_mask=A , encoder_hidden_states=A , encoder_attention_mask=A , )
UpperCAmelCase : str = model(
A , attention_mask=A , encoder_hidden_states=A , )
UpperCAmelCase : List[Any] = model(A , attention_mask=A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _lowercase( self , A , A , A , A , A , A , A , A , A , ) -> int:
UpperCAmelCase : Optional[int] = OpenLlamaForCausalLM(config=A )
model.to(A )
model.eval()
UpperCAmelCase : Tuple = model(A , attention_mask=A , labels=A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _lowercase( self , A , A , A , A , A , A , A , A , A , ) -> int:
UpperCAmelCase : Dict = True
UpperCAmelCase : Tuple = True
UpperCAmelCase : str = OpenLlamaForCausalLM(config=A )
model.to(A )
model.eval()
# first forward pass
UpperCAmelCase : Union[str, Any] = model(
A , attention_mask=A , encoder_hidden_states=A , encoder_attention_mask=A , use_cache=A , )
UpperCAmelCase : str = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
UpperCAmelCase : Tuple = ids_tensor((self.batch_size, 3) , config.vocab_size )
UpperCAmelCase : Tuple = ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
UpperCAmelCase : List[str] = torch.cat([input_ids, next_tokens] , dim=-1 )
UpperCAmelCase : List[Any] = torch.cat([input_mask, next_mask] , dim=-1 )
UpperCAmelCase : List[Any] = model(
A , attention_mask=A , encoder_hidden_states=A , encoder_attention_mask=A , output_hidden_states=A , )["""hidden_states"""][0]
UpperCAmelCase : Optional[Any] = model(
A , attention_mask=A , encoder_hidden_states=A , encoder_attention_mask=A , past_key_values=A , output_hidden_states=A , )["""hidden_states"""][0]
# select random slice
UpperCAmelCase : str = ids_tensor((1,) , output_from_past.shape[-1] ).item()
UpperCAmelCase : Any = output_from_no_past[:, -3:, random_slice_idx].detach()
UpperCAmelCase : Dict = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(A , A , atol=1e-3 ) )
def _lowercase( self ) -> Union[str, Any]:
UpperCAmelCase : Union[str, Any] = self.prepare_config_and_inputs()
(
(
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) ,
) : Tuple = config_and_inputs
UpperCAmelCase : str = {"""input_ids""": input_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class UpperCamelCase_ ( __magic_name__ , __magic_name__ , __magic_name__ , unittest.TestCase ):
lowercase = (
(OpenLlamaModel, OpenLlamaForCausalLM, OpenLlamaForSequenceClassification) if is_torch_available() else ()
)
lowercase = (OpenLlamaForCausalLM,) if is_torch_available() else ()
lowercase = (
{
'feature-extraction': OpenLlamaModel,
'text-classification': OpenLlamaForSequenceClassification,
'text-generation': OpenLlamaForCausalLM,
'zero-shot': OpenLlamaForSequenceClassification,
}
if is_torch_available()
else {}
)
lowercase = False
lowercase = False
def _lowercase( self ) -> Tuple:
UpperCAmelCase : Dict = OpenLlamaModelTester(self )
UpperCAmelCase : List[str] = ConfigTester(self , config_class=A , hidden_size=37 )
def _lowercase( self ) -> Optional[Any]:
self.config_tester.run_common_tests()
def _lowercase( self ) -> int:
UpperCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*A )
def _lowercase( self ) -> str:
UpperCAmelCase : Any = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
UpperCAmelCase : int = type
self.model_tester.create_and_check_model(*A )
def _lowercase( self ) -> str:
UpperCAmelCase , UpperCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase : List[str] = 3
UpperCAmelCase : Optional[Any] = input_dict["""input_ids"""]
UpperCAmelCase : str = input_ids.ne(1 ).to(A )
UpperCAmelCase : Tuple = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
UpperCAmelCase : Optional[Any] = OpenLlamaForSequenceClassification(A )
model.to(A )
model.eval()
UpperCAmelCase : List[Any] = model(A , attention_mask=A , labels=A )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def _lowercase( self ) -> Union[str, Any]:
UpperCAmelCase , UpperCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase : Dict = 3
UpperCAmelCase : Any = """single_label_classification"""
UpperCAmelCase : Dict = input_dict["""input_ids"""]
UpperCAmelCase : Optional[Any] = input_ids.ne(1 ).to(A )
UpperCAmelCase : str = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
UpperCAmelCase : Optional[Any] = OpenLlamaForSequenceClassification(A )
model.to(A )
model.eval()
UpperCAmelCase : Tuple = model(A , attention_mask=A , labels=A )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def _lowercase( self ) -> int:
UpperCAmelCase , UpperCAmelCase : Dict = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase : Tuple = 3
UpperCAmelCase : Optional[Any] = """multi_label_classification"""
UpperCAmelCase : Dict = input_dict["""input_ids"""]
UpperCAmelCase : int = input_ids.ne(1 ).to(A )
UpperCAmelCase : int = ids_tensor(
[self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float )
UpperCAmelCase : Any = OpenLlamaForSequenceClassification(A )
model.to(A )
model.eval()
UpperCAmelCase : Dict = model(A , attention_mask=A , labels=A )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
@unittest.skip("""Open-Llama buffers include complex numbers, which breaks this test""" )
def _lowercase( self ) -> Dict:
pass
@parameterized.expand([("""linear""",), ("""dynamic""",)] )
def _lowercase( self , A ) -> str:
UpperCAmelCase , UpperCAmelCase : Any = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase : Dict = ids_tensor([1, 10] , config.vocab_size )
UpperCAmelCase : List[str] = ids_tensor([1, int(config.max_position_embeddings * 1.5 )] , config.vocab_size )
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
UpperCAmelCase : Any = OpenLlamaModel(A )
original_model.to(A )
original_model.eval()
UpperCAmelCase : List[str] = original_model(A ).last_hidden_state
UpperCAmelCase : List[Any] = original_model(A ).last_hidden_state
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
UpperCAmelCase : Union[str, Any] = {"""type""": scaling_type, """factor""": 1_0.0}
UpperCAmelCase : str = OpenLlamaModel(A )
scaled_model.to(A )
scaled_model.eval()
UpperCAmelCase : List[str] = scaled_model(A ).last_hidden_state
UpperCAmelCase : Optional[int] = scaled_model(A ).last_hidden_state
# Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original
# maximum sequence length, so the outputs for the short input should match.
if scaling_type == "dynamic":
self.assertTrue(torch.allclose(A , A , atol=1e-5 ) )
else:
self.assertFalse(torch.allclose(A , A , atol=1e-5 ) )
# The output should be different for long inputs
self.assertFalse(torch.allclose(A , A , atol=1e-5 ) )
| 672 |
'''simple docstring'''
import math
def __lowerCamelCase ( _lowercase ) -> bool:
assert isinstance(_lowercase , _lowercase ) and (
number >= 0
), "'number' must been an int and positive"
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or not number % 2:
# Negatives, 0, 1 and all even numbers are not primes
return False
UpperCAmelCase : str = range(3 , int(math.sqrt(_lowercase ) + 1 ) , 2 )
return not any(not number % i for i in odd_numbers )
def __lowerCamelCase ( _lowercase , _lowercase=1 , **_lowercase ) -> Union[str, Any]:
UpperCAmelCase : Optional[int] = factor * value
UpperCAmelCase : List[Any] = value
while not is_prime(_lowercase ):
value += 1 if not ("desc" in kwargs and kwargs["desc"] is True) else -1
if value == first_value_val:
return next_prime(value + 1 , **_lowercase )
return value
| 672 | 1 |
'''simple docstring'''
def __lowerCamelCase ( _lowercase ) -> bool:
return credit_card_number.startswith(("""34""", """35""", """37""", """4""", """5""", """6""") )
def __lowerCamelCase ( _lowercase ) -> bool:
UpperCAmelCase : List[str] = credit_card_number
UpperCAmelCase : Tuple = 0
UpperCAmelCase : List[Any] = len(_lowercase ) - 2
for i in range(_lowercase , -1 , -2 ):
# double the value of every second digit
UpperCAmelCase : Union[str, Any] = int(cc_number[i] )
digit *= 2
# If doubling of a number results in a two digit number
# i.e greater than 9(e.g., 6 × 2 = 12),
# then add the digits of the product (e.g., 12: 1 + 2 = 3, 15: 1 + 5 = 6),
# to get a single digit number.
if digit > 9:
digit %= 1_0
digit += 1
UpperCAmelCase : Optional[Any] = cc_number[:i] + str(_lowercase ) + cc_number[i + 1 :]
total += digit
# Sum up the remaining digits
for i in range(len(_lowercase ) - 1 , -1 , -2 ):
total += int(cc_number[i] )
return total % 1_0 == 0
def __lowerCamelCase ( _lowercase ) -> bool:
UpperCAmelCase : Dict = F'''{credit_card_number} is an invalid credit card number because'''
if not credit_card_number.isdigit():
print(F'''{error_message} it has nonnumerical characters.''' )
return False
if not 1_3 <= len(_lowercase ) <= 1_6:
print(F'''{error_message} of its length.''' )
return False
if not validate_initial_digits(_lowercase ):
print(F'''{error_message} of its first two digits.''' )
return False
if not luhn_validation(_lowercase ):
print(F'''{error_message} it fails the Luhn check.''' )
return False
print(F'''{credit_card_number} is a valid credit card number.''' )
return True
if __name__ == "__main__":
import doctest
doctest.testmod()
validate_credit_card_number("""4111111111111111""")
validate_credit_card_number("""32323""")
| 672 |
'''simple docstring'''
def __lowerCamelCase ( _lowercase = "The quick brown fox jumps over the lazy dog" , ) -> bool:
UpperCAmelCase : Union[str, Any] = set()
# Replace all the whitespace in our sentence
UpperCAmelCase : List[str] = input_str.replace(""" """ , """""" )
for alpha in input_str:
if "a" <= alpha.lower() <= "z":
frequency.add(alpha.lower() )
return len(_lowercase ) == 2_6
def __lowerCamelCase ( _lowercase = "The quick brown fox jumps over the lazy dog" , ) -> bool:
UpperCAmelCase : Tuple = [False] * 2_6
for char in input_str:
if char.islower():
UpperCAmelCase : Any = True
elif char.isupper():
UpperCAmelCase : Union[str, Any] = True
return all(_lowercase )
def __lowerCamelCase ( _lowercase = "The quick brown fox jumps over the lazy dog" , ) -> bool:
return len({char for char in input_str.lower() if char.isalpha()} ) == 2_6
def __lowerCamelCase ( ) -> None:
from timeit import timeit
UpperCAmelCase : str = """from __main__ import is_pangram, is_pangram_faster, is_pangram_fastest"""
print(timeit("""is_pangram()""" , setup=_lowercase ) )
print(timeit("""is_pangram_faster()""" , setup=_lowercase ) )
print(timeit("""is_pangram_fastest()""" , setup=_lowercase ) )
# 5.348480500048026, 2.6477354579837993, 1.8470395830227062
# 5.036091582966037, 2.644472333951853, 1.8869528750656173
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 672 | 1 |
'''simple docstring'''
import itertools
import random
import unittest
import numpy as np
from transformers import WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST, WavaVecaConfig, WavaVecaFeatureExtractor
from transformers.testing_utils import require_torch, slow
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
a : str = random.Random()
def __lowerCamelCase ( _lowercase , _lowercase=1.0 , _lowercase=None , _lowercase=None ) -> Optional[Any]:
if rng is None:
UpperCAmelCase : List[str] = global_rng
UpperCAmelCase : Union[str, Any] = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
class UpperCamelCase_ ( unittest.TestCase ):
def __init__( self , A , A=7 , A=400 , A=2000 , A=1 , A=0.0 , A=16000 , A=True , A=True , ) -> Optional[int]:
UpperCAmelCase : Dict = parent
UpperCAmelCase : Dict = batch_size
UpperCAmelCase : str = min_seq_length
UpperCAmelCase : Optional[int] = max_seq_length
UpperCAmelCase : List[str] = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
UpperCAmelCase : List[Any] = feature_size
UpperCAmelCase : List[str] = padding_value
UpperCAmelCase : Any = sampling_rate
UpperCAmelCase : List[Any] = return_attention_mask
UpperCAmelCase : Union[str, Any] = do_normalize
def _lowercase( self ) -> Union[str, Any]:
return {
"feature_size": self.feature_size,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"return_attention_mask": self.return_attention_mask,
"do_normalize": self.do_normalize,
}
def _lowercase( self , A=False , A=False ) -> Optional[Any]:
def _flatten(A ):
return list(itertools.chain(*A ) )
if equal_length:
UpperCAmelCase : List[Any] = floats_list((self.batch_size, self.max_seq_length) )
else:
# make sure that inputs increase in size
UpperCAmelCase : Union[str, Any] = [
_flatten(floats_list((x, self.feature_size) ) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
UpperCAmelCase : Optional[int] = [np.asarray(A ) for x in speech_inputs]
return speech_inputs
class UpperCamelCase_ ( __magic_name__ , unittest.TestCase ):
lowercase = WavaVecaFeatureExtractor
def _lowercase( self ) -> List[str]:
UpperCAmelCase : str = WavaVecaFeatureExtractionTester(self )
def _lowercase( self , A ) -> int:
self.assertTrue(np.all(np.mean(A , axis=0 ) < 1e-3 ) )
self.assertTrue(np.all(np.abs(np.var(A , axis=0 ) - 1 ) < 1e-3 ) )
def _lowercase( self ) -> Optional[Any]:
# Tests that all call wrap to encode_plus and batch_encode_plus
UpperCAmelCase : List[str] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
UpperCAmelCase : Any = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
UpperCAmelCase : Any = [np.asarray(A ) for speech_input in speech_inputs]
# Test not batched input
UpperCAmelCase : Tuple = feat_extract(speech_inputs[0] , return_tensors="""np""" ).input_values
UpperCAmelCase : str = feat_extract(np_speech_inputs[0] , return_tensors="""np""" ).input_values
self.assertTrue(np.allclose(A , A , atol=1e-3 ) )
# Test batched
UpperCAmelCase : Optional[int] = feat_extract(A , return_tensors="""np""" ).input_values
UpperCAmelCase : List[Any] = feat_extract(A , return_tensors="""np""" ).input_values
for enc_seq_a, enc_seq_a in zip(A , A ):
self.assertTrue(np.allclose(A , A , atol=1e-3 ) )
# Test 2-D numpy arrays are batched.
UpperCAmelCase : Tuple = [floats_list((1, x) )[0] for x in (800, 800, 800)]
UpperCAmelCase : Optional[int] = np.asarray(A )
UpperCAmelCase : int = feat_extract(A , return_tensors="""np""" ).input_values
UpperCAmelCase : List[str] = feat_extract(A , return_tensors="""np""" ).input_values
for enc_seq_a, enc_seq_a in zip(A , A ):
self.assertTrue(np.allclose(A , A , atol=1e-3 ) )
def _lowercase( self ) -> Union[str, Any]:
UpperCAmelCase : Any = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
UpperCAmelCase : Union[str, Any] = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
UpperCAmelCase : Any = ["""longest""", """max_length""", """do_not_pad"""]
UpperCAmelCase : List[Any] = [None, 1600, None]
for max_length, padding in zip(A , A ):
UpperCAmelCase : Dict = feat_extract(A , padding=A , max_length=A , return_tensors="""np""" )
UpperCAmelCase : List[Any] = processed.input_values
self._check_zero_mean_unit_variance(input_values[0][:800] )
self.assertTrue(input_values[0][800:].sum() < 1e-6 )
self._check_zero_mean_unit_variance(input_values[1][:1000] )
self.assertTrue(input_values[0][1000:].sum() < 1e-6 )
self._check_zero_mean_unit_variance(input_values[2][:1200] )
def _lowercase( self ) -> str:
UpperCAmelCase : Tuple = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
UpperCAmelCase : str = range(800 , 1400 , 200 )
UpperCAmelCase : Optional[int] = [floats_list((1, x) )[0] for x in lengths]
UpperCAmelCase : Optional[int] = ["""longest""", """max_length""", """do_not_pad"""]
UpperCAmelCase : Dict = [None, 1600, None]
for max_length, padding in zip(A , A ):
UpperCAmelCase : List[Any] = feat_extract(A , max_length=A , padding=A )
UpperCAmelCase : Optional[Any] = processed.input_values
self._check_zero_mean_unit_variance(input_values[0][:800] )
self._check_zero_mean_unit_variance(input_values[1][:1000] )
self._check_zero_mean_unit_variance(input_values[2][:1200] )
def _lowercase( self ) -> List[Any]:
UpperCAmelCase : List[Any] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
UpperCAmelCase : Union[str, Any] = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
UpperCAmelCase : Dict = feat_extract(
A , truncation=A , max_length=1000 , padding="""max_length""" , return_tensors="""np""" )
UpperCAmelCase : int = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :800] )
self._check_zero_mean_unit_variance(input_values[1] )
self._check_zero_mean_unit_variance(input_values[2] )
def _lowercase( self ) -> Optional[Any]:
UpperCAmelCase : Optional[int] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
UpperCAmelCase : List[str] = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
UpperCAmelCase : Union[str, Any] = feat_extract(
A , truncation=A , max_length=1000 , padding="""longest""" , return_tensors="""np""" )
UpperCAmelCase : Union[str, Any] = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :800] )
self._check_zero_mean_unit_variance(input_values[1, :1000] )
self._check_zero_mean_unit_variance(input_values[2] )
# make sure that if max_length < longest -> then pad to max_length
self.assertTrue(input_values.shape == (3, 1000) )
UpperCAmelCase : List[Any] = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
UpperCAmelCase : List[str] = feat_extract(
A , truncation=A , max_length=2000 , padding="""longest""" , return_tensors="""np""" )
UpperCAmelCase : Optional[int] = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :800] )
self._check_zero_mean_unit_variance(input_values[1, :1000] )
self._check_zero_mean_unit_variance(input_values[2] )
# make sure that if max_length > longest -> then pad to longest
self.assertTrue(input_values.shape == (3, 1200) )
@require_torch
def _lowercase( self ) -> Tuple:
import torch
UpperCAmelCase : str = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
UpperCAmelCase : Tuple = np.random.rand(100 ).astype(np.floataa )
UpperCAmelCase : str = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
UpperCAmelCase : Union[str, Any] = feature_extractor.pad([{"""input_values""": inputs}] , return_tensors="""np""" )
self.assertTrue(np_processed.input_values.dtype == np.floataa )
UpperCAmelCase : Tuple = feature_extractor.pad([{"""input_values""": inputs}] , return_tensors="""pt""" )
self.assertTrue(pt_processed.input_values.dtype == torch.floataa )
@slow
@require_torch
def _lowercase( self ) -> Dict:
# this test makes sure that models that are using
# group norm don't have their feature extractor return the
# attention_mask
for model_id in WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST:
UpperCAmelCase : Optional[Any] = WavaVecaConfig.from_pretrained(A )
UpperCAmelCase : Optional[Any] = WavaVecaFeatureExtractor.from_pretrained(A )
# only "layer" feature extraction norm should make use of
# attention_mask
self.assertEqual(feat_extract.return_attention_mask , config.feat_extract_norm == """layer""" )
| 672 |
'''simple docstring'''
from sklearn.metrics import mean_squared_error
import datasets
a : Union[str, Any] = """\
@article{scikit-learn,
title={Scikit-learn: Machine Learning in {P}ython},
author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V.
and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P.
and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and
Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.},
journal={Journal of Machine Learning Research},
volume={12},
pages={2825--2830},
year={2011}
}
"""
a : int = """\
Mean Squared Error(MSE) is the average of the square of difference between the predicted
and actual values.
"""
a : int = """
Args:
predictions: array-like of shape (n_samples,) or (n_samples, n_outputs)
Estimated target values.
references: array-like of shape (n_samples,) or (n_samples, n_outputs)
Ground truth (correct) target values.
sample_weight: array-like of shape (n_samples,), default=None
Sample weights.
multioutput: {\"raw_values\", \"uniform_average\"} or array-like of shape (n_outputs,), default=\"uniform_average\"
Defines aggregating of multiple output values. Array-like value defines weights used to average errors.
\"raw_values\" : Returns a full set of errors in case of multioutput input.
\"uniform_average\" : Errors of all outputs are averaged with uniform weight.
squared : bool, default=True
If True returns MSE value, if False returns RMSE (Root Mean Squared Error) value.
Returns:
mse : mean squared error.
Examples:
>>> mse_metric = datasets.load_metric(\"mse\")
>>> predictions = [2.5, 0.0, 2, 8]
>>> references = [3, -0.5, 2, 7]
>>> results = mse_metric.compute(predictions=predictions, references=references)
>>> print(results)
{'mse': 0.375}
>>> rmse_result = mse_metric.compute(predictions=predictions, references=references, squared=False)
>>> print(rmse_result)
{'mse': 0.6123724356957945}
If you're using multi-dimensional lists, then set the config as follows :
>>> mse_metric = datasets.load_metric(\"mse\", \"multilist\")
>>> predictions = [[0.5, 1], [-1, 1], [7, -6]]
>>> references = [[0, 2], [-1, 2], [8, -5]]
>>> results = mse_metric.compute(predictions=predictions, references=references)
>>> print(results)
{'mse': 0.7083333333333334}
>>> results = mse_metric.compute(predictions=predictions, references=references, multioutput='raw_values')
>>> print(results) # doctest: +NORMALIZE_WHITESPACE
{'mse': array([0.41666667, 1. ])}
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class UpperCamelCase_ ( datasets.Metric ):
def _lowercase( self ) -> List[str]:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(self._get_feature_types() ) , reference_urls=[
"""https://scikit-learn.org/stable/modules/generated/sklearn.metrics.mean_squared_error.html"""
] , )
def _lowercase( self ) -> List[Any]:
if self.config_name == "multilist":
return {
"predictions": datasets.Sequence(datasets.Value("""float""" ) ),
"references": datasets.Sequence(datasets.Value("""float""" ) ),
}
else:
return {
"predictions": datasets.Value("""float""" ),
"references": datasets.Value("""float""" ),
}
def _lowercase( self , A , A , A=None , A="uniform_average" , A=True ) -> List[Any]:
UpperCAmelCase : List[Any] = mean_squared_error(
A , A , sample_weight=A , multioutput=A , squared=A )
return {"mse": mse}
| 672 | 1 |
'''simple docstring'''
import logging
from transformers.configuration_utils import PretrainedConfig
a : Tuple = logging.getLogger(__name__)
class UpperCamelCase_ ( __magic_name__ ):
lowercase = 'masked_bert'
def __init__( self , A=30522 , A=768 , A=12 , A=12 , A=3072 , A="gelu" , A=0.1 , A=0.1 , A=512 , A=2 , A=0.0_2 , A=1e-12 , A=0 , A="topK" , A="constant" , A=0.0 , **A , ) -> Optional[int]:
super().__init__(pad_token_id=A , **A )
UpperCAmelCase : Optional[int] = vocab_size
UpperCAmelCase : Union[str, Any] = hidden_size
UpperCAmelCase : int = num_hidden_layers
UpperCAmelCase : List[Any] = num_attention_heads
UpperCAmelCase : str = hidden_act
UpperCAmelCase : List[Any] = intermediate_size
UpperCAmelCase : str = hidden_dropout_prob
UpperCAmelCase : List[Any] = attention_probs_dropout_prob
UpperCAmelCase : Optional[Any] = max_position_embeddings
UpperCAmelCase : Optional[int] = type_vocab_size
UpperCAmelCase : str = initializer_range
UpperCAmelCase : Dict = layer_norm_eps
UpperCAmelCase : List[Any] = pruning_method
UpperCAmelCase : Any = mask_init
UpperCAmelCase : Dict = mask_scale
| 672 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a : str = logging.get_logger(__name__)
a : Any = {
"""google/fnet-base""": """https://huggingface.co/google/fnet-base/resolve/main/config.json""",
"""google/fnet-large""": """https://huggingface.co/google/fnet-large/resolve/main/config.json"""
# See all FNet models at https://huggingface.co/models?filter=fnet
}
class UpperCamelCase_ ( __magic_name__ ):
lowercase = 'fnet'
def __init__( self , A=32000 , A=768 , A=12 , A=3072 , A="gelu_new" , A=0.1 , A=512 , A=4 , A=0.0_2 , A=1e-12 , A=False , A=512 , A=3 , A=1 , A=2 , **A , ) -> int:
super().__init__(pad_token_id=A , bos_token_id=A , eos_token_id=A , **A )
UpperCAmelCase : Optional[Any] = vocab_size
UpperCAmelCase : Dict = max_position_embeddings
UpperCAmelCase : List[str] = hidden_size
UpperCAmelCase : int = num_hidden_layers
UpperCAmelCase : Any = intermediate_size
UpperCAmelCase : List[str] = hidden_act
UpperCAmelCase : Tuple = hidden_dropout_prob
UpperCAmelCase : List[str] = initializer_range
UpperCAmelCase : List[Any] = type_vocab_size
UpperCAmelCase : int = layer_norm_eps
UpperCAmelCase : Optional[Any] = use_tpu_fourier_optimizations
UpperCAmelCase : List[Any] = tpu_short_seq_length
| 672 | 1 |
'''simple docstring'''
a : List[Any] = """Alexander Joslin"""
import operator as op
from .stack import Stack
def __lowerCamelCase ( _lowercase ) -> int:
UpperCAmelCase : Dict = {"""*""": op.mul, """/""": op.truediv, """+""": op.add, """-""": op.sub}
UpperCAmelCase : Stack[int] = Stack()
UpperCAmelCase : Stack[str] = Stack()
for i in equation:
if i.isdigit():
# RULE 1
operand_stack.push(int(_lowercase ) )
elif i in operators:
# RULE 2
operator_stack.push(_lowercase )
elif i == ")":
# RULE 4
UpperCAmelCase : List[Any] = operator_stack.peek()
operator_stack.pop()
UpperCAmelCase : str = operand_stack.peek()
operand_stack.pop()
UpperCAmelCase : str = operand_stack.peek()
operand_stack.pop()
UpperCAmelCase : List[Any] = operators[opr](_lowercase , _lowercase )
operand_stack.push(_lowercase )
# RULE 5
return operand_stack.peek()
if __name__ == "__main__":
a : Tuple = """(5 + ((4 * 2) * (2 + 3)))"""
# answer = 45
print(F'''{equation} = {dijkstras_two_stack_algorithm(equation)}''')
| 672 |
'''simple docstring'''
a : List[Any] = """Alexander Joslin"""
import operator as op
from .stack import Stack
def __lowerCamelCase ( _lowercase ) -> int:
UpperCAmelCase : Dict = {"""*""": op.mul, """/""": op.truediv, """+""": op.add, """-""": op.sub}
UpperCAmelCase : Stack[int] = Stack()
UpperCAmelCase : Stack[str] = Stack()
for i in equation:
if i.isdigit():
# RULE 1
operand_stack.push(int(_lowercase ) )
elif i in operators:
# RULE 2
operator_stack.push(_lowercase )
elif i == ")":
# RULE 4
UpperCAmelCase : List[Any] = operator_stack.peek()
operator_stack.pop()
UpperCAmelCase : str = operand_stack.peek()
operand_stack.pop()
UpperCAmelCase : str = operand_stack.peek()
operand_stack.pop()
UpperCAmelCase : List[Any] = operators[opr](_lowercase , _lowercase )
operand_stack.push(_lowercase )
# RULE 5
return operand_stack.peek()
if __name__ == "__main__":
a : Tuple = """(5 + ((4 * 2) * (2 + 3)))"""
# answer = 45
print(F'''{equation} = {dijkstras_two_stack_algorithm(equation)}''')
| 672 | 1 |
'''simple docstring'''
import argparse
import gc
import json
import os
import shutil
import warnings
import torch
from transformers import LlamaConfig, LlamaForCausalLM, LlamaTokenizer
try:
from transformers import LlamaTokenizerFast
except ImportError as e:
warnings.warn(e)
warnings.warn(
"""The converted tokenizer will be the `slow` tokenizer. To use the fast, update your `tokenizers` library and re-run the tokenizer conversion"""
)
a : Dict = None
a : str = {
"""7B""": 1_1_0_0_8,
"""13B""": 1_3_8_2_4,
"""30B""": 1_7_9_2_0,
"""65B""": 2_2_0_1_6,
"""70B""": 2_8_6_7_2,
}
a : Tuple = {
"""7B""": 1,
"""7Bf""": 1,
"""13B""": 2,
"""13Bf""": 2,
"""30B""": 4,
"""65B""": 8,
"""70B""": 8,
"""70Bf""": 8,
}
def __lowerCamelCase ( _lowercase , _lowercase=1 , _lowercase=2_5_6 ) -> int:
return multiple_of * ((int(ffn_dim_multiplier * int(8 * n / 3 ) ) + multiple_of - 1) // multiple_of)
def __lowerCamelCase ( _lowercase ) -> List[Any]:
with open(_lowercase , """r""" ) as f:
return json.load(_lowercase )
def __lowerCamelCase ( _lowercase , _lowercase ) -> Dict:
with open(_lowercase , """w""" ) as f:
json.dump(_lowercase , _lowercase )
def __lowerCamelCase ( _lowercase , _lowercase , _lowercase , _lowercase=True ) -> Dict:
os.makedirs(_lowercase , exist_ok=_lowercase )
UpperCAmelCase : int = os.path.join(_lowercase , """tmp""" )
os.makedirs(_lowercase , exist_ok=_lowercase )
UpperCAmelCase : Optional[int] = read_json(os.path.join(_lowercase , """params.json""" ) )
UpperCAmelCase : Dict = NUM_SHARDS[model_size]
UpperCAmelCase : int = params["""n_layers"""]
UpperCAmelCase : str = params["""n_heads"""]
UpperCAmelCase : Optional[Any] = n_heads // num_shards
UpperCAmelCase : List[str] = params["""dim"""]
UpperCAmelCase : str = dim // n_heads
UpperCAmelCase : Optional[int] = 1_0000.0
UpperCAmelCase : str = 1.0 / (base ** (torch.arange(0 , _lowercase , 2 ).float() / dims_per_head))
if "n_kv_heads" in params:
UpperCAmelCase : List[Any] = params["""n_kv_heads"""] # for GQA / MQA
UpperCAmelCase : str = n_heads_per_shard // num_key_value_heads
UpperCAmelCase : int = dim // num_key_value_heads
else: # compatibility with other checkpoints
UpperCAmelCase : Optional[int] = n_heads
UpperCAmelCase : Dict = n_heads_per_shard
UpperCAmelCase : Union[str, Any] = dim
# permute for sliced rotary
def permute(_lowercase , _lowercase=n_heads , _lowercase=dim , _lowercase=dim ):
return w.view(_lowercase , dima // n_heads // 2 , 2 , _lowercase ).transpose(1 , 2 ).reshape(_lowercase , _lowercase )
print(F'''Fetching all parameters from the checkpoint at {input_base_path}.''' )
# Load weights
if model_size == "7B":
# Not sharded
# (The sharded implementation would also work, but this is simpler.)
UpperCAmelCase : Tuple = torch.load(os.path.join(_lowercase , """consolidated.00.pth""" ) , map_location="""cpu""" )
else:
# Sharded
UpperCAmelCase : Optional[Any] = [
torch.load(os.path.join(_lowercase , F'''consolidated.{i:02d}.pth''' ) , map_location="""cpu""" )
for i in range(_lowercase )
]
UpperCAmelCase : Optional[int] = 0
UpperCAmelCase : Any = {"""weight_map""": {}}
for layer_i in range(_lowercase ):
UpperCAmelCase : Dict = F'''pytorch_model-{layer_i + 1}-of-{n_layers + 1}.bin'''
if model_size == "7B":
# Unsharded
UpperCAmelCase : List[Any] = {
F'''model.layers.{layer_i}.self_attn.q_proj.weight''': permute(
loaded[F'''layers.{layer_i}.attention.wq.weight'''] ),
F'''model.layers.{layer_i}.self_attn.k_proj.weight''': permute(
loaded[F'''layers.{layer_i}.attention.wk.weight'''] ),
F'''model.layers.{layer_i}.self_attn.v_proj.weight''': loaded[F'''layers.{layer_i}.attention.wv.weight'''],
F'''model.layers.{layer_i}.self_attn.o_proj.weight''': loaded[F'''layers.{layer_i}.attention.wo.weight'''],
F'''model.layers.{layer_i}.mlp.gate_proj.weight''': loaded[F'''layers.{layer_i}.feed_forward.w1.weight'''],
F'''model.layers.{layer_i}.mlp.down_proj.weight''': loaded[F'''layers.{layer_i}.feed_forward.w2.weight'''],
F'''model.layers.{layer_i}.mlp.up_proj.weight''': loaded[F'''layers.{layer_i}.feed_forward.w3.weight'''],
F'''model.layers.{layer_i}.input_layernorm.weight''': loaded[F'''layers.{layer_i}.attention_norm.weight'''],
F'''model.layers.{layer_i}.post_attention_layernorm.weight''': loaded[F'''layers.{layer_i}.ffn_norm.weight'''],
}
else:
# Sharded
# Note that attention.w{q,k,v,o}, feed_fordward.w[1,2,3], attention_norm.weight and ffn_norm.weight share
# the same storage object, saving attention_norm and ffn_norm will save other weights too, which is
# redundant as other weights will be stitched from multiple shards. To avoid that, they are cloned.
UpperCAmelCase : List[str] = {
F'''model.layers.{layer_i}.input_layernorm.weight''': loaded[0][
F'''layers.{layer_i}.attention_norm.weight'''
].clone(),
F'''model.layers.{layer_i}.post_attention_layernorm.weight''': loaded[0][
F'''layers.{layer_i}.ffn_norm.weight'''
].clone(),
}
UpperCAmelCase : int = permute(
torch.cat(
[
loaded[i][F'''layers.{layer_i}.attention.wq.weight'''].view(_lowercase , _lowercase , _lowercase )
for i in range(_lowercase )
] , dim=0 , ).reshape(_lowercase , _lowercase ) )
UpperCAmelCase : Optional[Any] = permute(
torch.cat(
[
loaded[i][F'''layers.{layer_i}.attention.wk.weight'''].view(
_lowercase , _lowercase , _lowercase )
for i in range(_lowercase )
] , dim=0 , ).reshape(_lowercase , _lowercase ) , _lowercase , _lowercase , _lowercase , )
UpperCAmelCase : List[str] = torch.cat(
[
loaded[i][F'''layers.{layer_i}.attention.wv.weight'''].view(
_lowercase , _lowercase , _lowercase )
for i in range(_lowercase )
] , dim=0 , ).reshape(_lowercase , _lowercase )
UpperCAmelCase : Tuple = torch.cat(
[loaded[i][F'''layers.{layer_i}.attention.wo.weight'''] for i in range(_lowercase )] , dim=1 )
UpperCAmelCase : Optional[int] = torch.cat(
[loaded[i][F'''layers.{layer_i}.feed_forward.w1.weight'''] for i in range(_lowercase )] , dim=0 )
UpperCAmelCase : Dict = torch.cat(
[loaded[i][F'''layers.{layer_i}.feed_forward.w2.weight'''] for i in range(_lowercase )] , dim=1 )
UpperCAmelCase : Optional[int] = torch.cat(
[loaded[i][F'''layers.{layer_i}.feed_forward.w3.weight'''] for i in range(_lowercase )] , dim=0 )
UpperCAmelCase : Optional[int] = inv_freq
for k, v in state_dict.items():
UpperCAmelCase : List[str] = filename
param_count += v.numel()
torch.save(_lowercase , os.path.join(_lowercase , _lowercase ) )
UpperCAmelCase : Any = F'''pytorch_model-{n_layers + 1}-of-{n_layers + 1}.bin'''
if model_size == "7B":
# Unsharded
UpperCAmelCase : Any = {
"""model.embed_tokens.weight""": loaded["""tok_embeddings.weight"""],
"""model.norm.weight""": loaded["""norm.weight"""],
"""lm_head.weight""": loaded["""output.weight"""],
}
else:
UpperCAmelCase : Any = {
"""model.norm.weight""": loaded[0]["""norm.weight"""],
"""model.embed_tokens.weight""": torch.cat(
[loaded[i]["""tok_embeddings.weight"""] for i in range(_lowercase )] , dim=1 ),
"""lm_head.weight""": torch.cat([loaded[i]["""output.weight"""] for i in range(_lowercase )] , dim=0 ),
}
for k, v in state_dict.items():
UpperCAmelCase : Any = filename
param_count += v.numel()
torch.save(_lowercase , os.path.join(_lowercase , _lowercase ) )
# Write configs
UpperCAmelCase : str = {"""total_size""": param_count * 2}
write_json(_lowercase , os.path.join(_lowercase , """pytorch_model.bin.index.json""" ) )
UpperCAmelCase : List[str] = params["""ffn_dim_multiplier"""] if """ffn_dim_multiplier""" in params else 1
UpperCAmelCase : Optional[int] = params["""multiple_of"""] if """multiple_of""" in params else 2_5_6
UpperCAmelCase : Any = LlamaConfig(
hidden_size=_lowercase , intermediate_size=compute_intermediate_size(_lowercase , _lowercase , _lowercase ) , num_attention_heads=params["""n_heads"""] , num_hidden_layers=params["""n_layers"""] , rms_norm_eps=params["""norm_eps"""] , num_key_value_heads=_lowercase , )
config.save_pretrained(_lowercase )
# Make space so we can load the model properly now.
del state_dict
del loaded
gc.collect()
print("""Loading the checkpoint in a Llama model.""" )
UpperCAmelCase : Tuple = LlamaForCausalLM.from_pretrained(_lowercase , torch_dtype=torch.floataa , low_cpu_mem_usage=_lowercase )
# Avoid saving this as part of the config.
del model.config._name_or_path
print("""Saving in the Transformers format.""" )
model.save_pretrained(_lowercase , safe_serialization=_lowercase )
shutil.rmtree(_lowercase )
def __lowerCamelCase ( _lowercase , _lowercase ) -> Optional[Any]:
# Initialize the tokenizer based on the `spm` model
UpperCAmelCase : Union[str, Any] = LlamaTokenizer if LlamaTokenizerFast is None else LlamaTokenizerFast
print(F'''Saving a {tokenizer_class.__name__} to {tokenizer_path}.''' )
UpperCAmelCase : List[str] = tokenizer_class(_lowercase )
tokenizer.save_pretrained(_lowercase )
def __lowerCamelCase ( ) -> Optional[int]:
UpperCAmelCase : Optional[Any] = argparse.ArgumentParser()
parser.add_argument(
"""--input_dir""" , help="""Location of LLaMA weights, which contains tokenizer.model and model folders""" , )
parser.add_argument(
"""--model_size""" , choices=["""7B""", """7Bf""", """13B""", """13Bf""", """30B""", """65B""", """70B""", """70Bf""", """tokenizer_only"""] , )
parser.add_argument(
"""--output_dir""" , help="""Location to write HF model and tokenizer""" , )
parser.add_argument("""--safe_serialization""" , type=_lowercase , help="""Whether or not to save using `safetensors`.""" )
UpperCAmelCase : Tuple = parser.parse_args()
if args.model_size != "tokenizer_only":
write_model(
model_path=args.output_dir , input_base_path=os.path.join(args.input_dir , args.model_size ) , model_size=args.model_size , safe_serialization=args.safe_serialization , )
UpperCAmelCase : Optional[int] = os.path.join(args.input_dir , """tokenizer.model""" )
write_tokenizer(args.output_dir , _lowercase )
if __name__ == "__main__":
main()
| 672 |
'''simple docstring'''
import argparse
import json
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
ConditionalDetrConfig,
ConditionalDetrForObjectDetection,
ConditionalDetrForSegmentation,
ConditionalDetrImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
a : List[Any] = logging.get_logger(__name__)
# here we list all keys to be renamed (original name on the left, our name on the right)
a : List[str] = []
for i in range(6):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(F'''transformer.encoder.layers.{i}.self_attn.out_proj.weight''', F'''encoder.layers.{i}.self_attn.out_proj.weight''')
)
rename_keys.append(
(F'''transformer.encoder.layers.{i}.self_attn.out_proj.bias''', F'''encoder.layers.{i}.self_attn.out_proj.bias''')
)
rename_keys.append((F'''transformer.encoder.layers.{i}.linear1.weight''', F'''encoder.layers.{i}.fc1.weight'''))
rename_keys.append((F'''transformer.encoder.layers.{i}.linear1.bias''', F'''encoder.layers.{i}.fc1.bias'''))
rename_keys.append((F'''transformer.encoder.layers.{i}.linear2.weight''', F'''encoder.layers.{i}.fc2.weight'''))
rename_keys.append((F'''transformer.encoder.layers.{i}.linear2.bias''', F'''encoder.layers.{i}.fc2.bias'''))
rename_keys.append(
(F'''transformer.encoder.layers.{i}.norm1.weight''', F'''encoder.layers.{i}.self_attn_layer_norm.weight''')
)
rename_keys.append((F'''transformer.encoder.layers.{i}.norm1.bias''', F'''encoder.layers.{i}.self_attn_layer_norm.bias'''))
rename_keys.append((F'''transformer.encoder.layers.{i}.norm2.weight''', F'''encoder.layers.{i}.final_layer_norm.weight'''))
rename_keys.append((F'''transformer.encoder.layers.{i}.norm2.bias''', F'''encoder.layers.{i}.final_layer_norm.bias'''))
# decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms
rename_keys.append(
(F'''transformer.decoder.layers.{i}.self_attn.out_proj.weight''', F'''decoder.layers.{i}.self_attn.out_proj.weight''')
)
rename_keys.append(
(F'''transformer.decoder.layers.{i}.self_attn.out_proj.bias''', F'''decoder.layers.{i}.self_attn.out_proj.bias''')
)
rename_keys.append(
(
F'''transformer.decoder.layers.{i}.cross_attn.out_proj.weight''',
F'''decoder.layers.{i}.encoder_attn.out_proj.weight''',
)
)
rename_keys.append(
(
F'''transformer.decoder.layers.{i}.cross_attn.out_proj.bias''',
F'''decoder.layers.{i}.encoder_attn.out_proj.bias''',
)
)
rename_keys.append((F'''transformer.decoder.layers.{i}.linear1.weight''', F'''decoder.layers.{i}.fc1.weight'''))
rename_keys.append((F'''transformer.decoder.layers.{i}.linear1.bias''', F'''decoder.layers.{i}.fc1.bias'''))
rename_keys.append((F'''transformer.decoder.layers.{i}.linear2.weight''', F'''decoder.layers.{i}.fc2.weight'''))
rename_keys.append((F'''transformer.decoder.layers.{i}.linear2.bias''', F'''decoder.layers.{i}.fc2.bias'''))
rename_keys.append(
(F'''transformer.decoder.layers.{i}.norm1.weight''', F'''decoder.layers.{i}.self_attn_layer_norm.weight''')
)
rename_keys.append((F'''transformer.decoder.layers.{i}.norm1.bias''', F'''decoder.layers.{i}.self_attn_layer_norm.bias'''))
rename_keys.append(
(F'''transformer.decoder.layers.{i}.norm2.weight''', F'''decoder.layers.{i}.encoder_attn_layer_norm.weight''')
)
rename_keys.append(
(F'''transformer.decoder.layers.{i}.norm2.bias''', F'''decoder.layers.{i}.encoder_attn_layer_norm.bias''')
)
rename_keys.append((F'''transformer.decoder.layers.{i}.norm3.weight''', F'''decoder.layers.{i}.final_layer_norm.weight'''))
rename_keys.append((F'''transformer.decoder.layers.{i}.norm3.bias''', F'''decoder.layers.{i}.final_layer_norm.bias'''))
# q, k, v projections in self/cross-attention in decoder for conditional DETR
rename_keys.append(
(F'''transformer.decoder.layers.{i}.sa_qcontent_proj.weight''', F'''decoder.layers.{i}.sa_qcontent_proj.weight''')
)
rename_keys.append(
(F'''transformer.decoder.layers.{i}.sa_kcontent_proj.weight''', F'''decoder.layers.{i}.sa_kcontent_proj.weight''')
)
rename_keys.append(
(F'''transformer.decoder.layers.{i}.sa_qpos_proj.weight''', F'''decoder.layers.{i}.sa_qpos_proj.weight''')
)
rename_keys.append(
(F'''transformer.decoder.layers.{i}.sa_kpos_proj.weight''', F'''decoder.layers.{i}.sa_kpos_proj.weight''')
)
rename_keys.append((F'''transformer.decoder.layers.{i}.sa_v_proj.weight''', F'''decoder.layers.{i}.sa_v_proj.weight'''))
rename_keys.append(
(F'''transformer.decoder.layers.{i}.ca_qcontent_proj.weight''', F'''decoder.layers.{i}.ca_qcontent_proj.weight''')
)
# rename_keys.append((f"transformer.decoder.layers.{i}.ca_qpos_proj.weight", f"decoder.layers.{i}.ca_qpos_proj.weight"))
rename_keys.append(
(F'''transformer.decoder.layers.{i}.ca_kcontent_proj.weight''', F'''decoder.layers.{i}.ca_kcontent_proj.weight''')
)
rename_keys.append(
(F'''transformer.decoder.layers.{i}.ca_kpos_proj.weight''', F'''decoder.layers.{i}.ca_kpos_proj.weight''')
)
rename_keys.append((F'''transformer.decoder.layers.{i}.ca_v_proj.weight''', F'''decoder.layers.{i}.ca_v_proj.weight'''))
rename_keys.append(
(F'''transformer.decoder.layers.{i}.ca_qpos_sine_proj.weight''', F'''decoder.layers.{i}.ca_qpos_sine_proj.weight''')
)
rename_keys.append(
(F'''transformer.decoder.layers.{i}.sa_qcontent_proj.bias''', F'''decoder.layers.{i}.sa_qcontent_proj.bias''')
)
rename_keys.append(
(F'''transformer.decoder.layers.{i}.sa_kcontent_proj.bias''', F'''decoder.layers.{i}.sa_kcontent_proj.bias''')
)
rename_keys.append((F'''transformer.decoder.layers.{i}.sa_qpos_proj.bias''', F'''decoder.layers.{i}.sa_qpos_proj.bias'''))
rename_keys.append((F'''transformer.decoder.layers.{i}.sa_kpos_proj.bias''', F'''decoder.layers.{i}.sa_kpos_proj.bias'''))
rename_keys.append((F'''transformer.decoder.layers.{i}.sa_v_proj.bias''', F'''decoder.layers.{i}.sa_v_proj.bias'''))
rename_keys.append(
(F'''transformer.decoder.layers.{i}.ca_qcontent_proj.bias''', F'''decoder.layers.{i}.ca_qcontent_proj.bias''')
)
# rename_keys.append((f"transformer.decoder.layers.{i}.ca_qpos_proj.bias", f"decoder.layers.{i}.ca_qpos_proj.bias"))
rename_keys.append(
(F'''transformer.decoder.layers.{i}.ca_kcontent_proj.bias''', F'''decoder.layers.{i}.ca_kcontent_proj.bias''')
)
rename_keys.append((F'''transformer.decoder.layers.{i}.ca_kpos_proj.bias''', F'''decoder.layers.{i}.ca_kpos_proj.bias'''))
rename_keys.append((F'''transformer.decoder.layers.{i}.ca_v_proj.bias''', F'''decoder.layers.{i}.ca_v_proj.bias'''))
rename_keys.append(
(F'''transformer.decoder.layers.{i}.ca_qpos_sine_proj.bias''', F'''decoder.layers.{i}.ca_qpos_sine_proj.bias''')
)
# convolutional projection + query embeddings + layernorm of decoder + class and bounding box heads
# for conditional DETR, also convert reference point head and query scale MLP
rename_keys.extend(
[
("""input_proj.weight""", """input_projection.weight"""),
("""input_proj.bias""", """input_projection.bias"""),
("""query_embed.weight""", """query_position_embeddings.weight"""),
("""transformer.decoder.norm.weight""", """decoder.layernorm.weight"""),
("""transformer.decoder.norm.bias""", """decoder.layernorm.bias"""),
("""class_embed.weight""", """class_labels_classifier.weight"""),
("""class_embed.bias""", """class_labels_classifier.bias"""),
("""bbox_embed.layers.0.weight""", """bbox_predictor.layers.0.weight"""),
("""bbox_embed.layers.0.bias""", """bbox_predictor.layers.0.bias"""),
("""bbox_embed.layers.1.weight""", """bbox_predictor.layers.1.weight"""),
("""bbox_embed.layers.1.bias""", """bbox_predictor.layers.1.bias"""),
("""bbox_embed.layers.2.weight""", """bbox_predictor.layers.2.weight"""),
("""bbox_embed.layers.2.bias""", """bbox_predictor.layers.2.bias"""),
("""transformer.decoder.ref_point_head.layers.0.weight""", """decoder.ref_point_head.layers.0.weight"""),
("""transformer.decoder.ref_point_head.layers.0.bias""", """decoder.ref_point_head.layers.0.bias"""),
("""transformer.decoder.ref_point_head.layers.1.weight""", """decoder.ref_point_head.layers.1.weight"""),
("""transformer.decoder.ref_point_head.layers.1.bias""", """decoder.ref_point_head.layers.1.bias"""),
("""transformer.decoder.query_scale.layers.0.weight""", """decoder.query_scale.layers.0.weight"""),
("""transformer.decoder.query_scale.layers.0.bias""", """decoder.query_scale.layers.0.bias"""),
("""transformer.decoder.query_scale.layers.1.weight""", """decoder.query_scale.layers.1.weight"""),
("""transformer.decoder.query_scale.layers.1.bias""", """decoder.query_scale.layers.1.bias"""),
("""transformer.decoder.layers.0.ca_qpos_proj.weight""", """decoder.layers.0.ca_qpos_proj.weight"""),
("""transformer.decoder.layers.0.ca_qpos_proj.bias""", """decoder.layers.0.ca_qpos_proj.bias"""),
]
)
def __lowerCamelCase ( _lowercase , _lowercase , _lowercase ) -> Optional[Any]:
UpperCAmelCase : List[str] = state_dict.pop(_lowercase )
UpperCAmelCase : List[str] = val
def __lowerCamelCase ( _lowercase ) -> Any:
UpperCAmelCase : Union[str, Any] = OrderedDict()
for key, value in state_dict.items():
if "backbone.0.body" in key:
UpperCAmelCase : List[str] = key.replace("""backbone.0.body""" , """backbone.conv_encoder.model""" )
UpperCAmelCase : Dict = value
else:
UpperCAmelCase : List[Any] = value
return new_state_dict
def __lowerCamelCase ( _lowercase , _lowercase=False ) -> Optional[int]:
UpperCAmelCase : Dict = """"""
if is_panoptic:
UpperCAmelCase : Tuple = """conditional_detr."""
# first: transformer encoder
for i in range(6 ):
# read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias)
UpperCAmelCase : List[Any] = state_dict.pop(F'''{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight''' )
UpperCAmelCase : List[Any] = state_dict.pop(F'''{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias''' )
# next, add query, keys and values (in that order) to the state dict
UpperCAmelCase : Dict = in_proj_weight[:2_5_6, :]
UpperCAmelCase : Optional[Any] = in_proj_bias[:2_5_6]
UpperCAmelCase : List[Any] = in_proj_weight[2_5_6:5_1_2, :]
UpperCAmelCase : Tuple = in_proj_bias[2_5_6:5_1_2]
UpperCAmelCase : List[str] = in_proj_weight[-2_5_6:, :]
UpperCAmelCase : List[str] = in_proj_bias[-2_5_6:]
def __lowerCamelCase ( ) -> Dict:
UpperCAmelCase : List[Any] = """http://images.cocodataset.org/val2017/000000039769.jpg"""
UpperCAmelCase : Tuple = Image.open(requests.get(_lowercase , stream=_lowercase ).raw )
return im
@torch.no_grad()
def __lowerCamelCase ( _lowercase , _lowercase ) -> str:
UpperCAmelCase : str = ConditionalDetrConfig()
# set backbone and dilation attributes
if "resnet101" in model_name:
UpperCAmelCase : List[Any] = """resnet101"""
if "dc5" in model_name:
UpperCAmelCase : Optional[int] = True
UpperCAmelCase : List[Any] = """panoptic""" in model_name
if is_panoptic:
UpperCAmelCase : Union[str, Any] = 2_5_0
else:
UpperCAmelCase : int = 9_1
UpperCAmelCase : Tuple = """huggingface/label-files"""
UpperCAmelCase : List[Any] = """coco-detection-id2label.json"""
UpperCAmelCase : Optional[int] = json.load(open(hf_hub_download(_lowercase , _lowercase , repo_type="""dataset""" ) , """r""" ) )
UpperCAmelCase : Dict = {int(_lowercase ): v for k, v in idalabel.items()}
UpperCAmelCase : Optional[Any] = idalabel
UpperCAmelCase : List[Any] = {v: k for k, v in idalabel.items()}
# load image processor
UpperCAmelCase : List[str] = """coco_panoptic""" if is_panoptic else """coco_detection"""
UpperCAmelCase : List[Any] = ConditionalDetrImageProcessor(format=_lowercase )
# prepare image
UpperCAmelCase : Union[str, Any] = prepare_img()
UpperCAmelCase : Dict = image_processor(images=_lowercase , return_tensors="""pt""" )
UpperCAmelCase : List[Any] = encoding["""pixel_values"""]
logger.info(F'''Converting model {model_name}...''' )
# load original model from torch hub
UpperCAmelCase : int = torch.hub.load("""DeppMeng/ConditionalDETR""" , _lowercase , pretrained=_lowercase ).eval()
UpperCAmelCase : List[Any] = conditional_detr.state_dict()
# rename keys
for src, dest in rename_keys:
if is_panoptic:
UpperCAmelCase : List[Any] = """conditional_detr.""" + src
rename_key(_lowercase , _lowercase , _lowercase )
UpperCAmelCase : List[Any] = rename_backbone_keys(_lowercase )
# query, key and value matrices need special treatment
read_in_q_k_v(_lowercase , is_panoptic=_lowercase )
# important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them
UpperCAmelCase : int = """conditional_detr.model.""" if is_panoptic else """model."""
for key in state_dict.copy().keys():
if is_panoptic:
if (
key.startswith("""conditional_detr""" )
and not key.startswith("""class_labels_classifier""" )
and not key.startswith("""bbox_predictor""" )
):
UpperCAmelCase : Union[str, Any] = state_dict.pop(_lowercase )
UpperCAmelCase : int = val
elif "class_labels_classifier" in key or "bbox_predictor" in key:
UpperCAmelCase : Any = state_dict.pop(_lowercase )
UpperCAmelCase : Optional[Any] = val
elif key.startswith("""bbox_attention""" ) or key.startswith("""mask_head""" ):
continue
else:
UpperCAmelCase : List[Any] = state_dict.pop(_lowercase )
UpperCAmelCase : str = val
else:
if not key.startswith("""class_labels_classifier""" ) and not key.startswith("""bbox_predictor""" ):
UpperCAmelCase : Optional[int] = state_dict.pop(_lowercase )
UpperCAmelCase : Union[str, Any] = val
# finally, create HuggingFace model and load state dict
UpperCAmelCase : List[Any] = ConditionalDetrForSegmentation(_lowercase ) if is_panoptic else ConditionalDetrForObjectDetection(_lowercase )
model.load_state_dict(_lowercase )
model.eval()
model.push_to_hub(repo_id=_lowercase , organization="""DepuMeng""" , commit_message="""Add model""" )
# verify our conversion
UpperCAmelCase : Union[str, Any] = conditional_detr(_lowercase )
UpperCAmelCase : int = model(_lowercase )
assert torch.allclose(outputs.logits , original_outputs["""pred_logits"""] , atol=1e-4 )
assert torch.allclose(outputs.pred_boxes , original_outputs["""pred_boxes"""] , atol=1e-4 )
if is_panoptic:
assert torch.allclose(outputs.pred_masks , original_outputs["""pred_masks"""] , atol=1e-4 )
# Save model and image processor
logger.info(F'''Saving PyTorch model and image processor to {pytorch_dump_folder_path}...''' )
Path(_lowercase ).mkdir(exist_ok=_lowercase )
model.save_pretrained(_lowercase )
image_processor.save_pretrained(_lowercase )
if __name__ == "__main__":
a : Tuple = argparse.ArgumentParser()
parser.add_argument(
"""--model_name""",
default="""conditional_detr_resnet50""",
type=str,
help="""Name of the CONDITIONAL_DETR model you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the folder to output PyTorch model."""
)
a : Optional[Any] = parser.parse_args()
convert_conditional_detr_checkpoint(args.model_name, args.pytorch_dump_folder_path)
| 672 | 1 |
'''simple docstring'''
from __future__ import annotations
from collections.abc import Sequence
from typing import Literal
def __lowerCamelCase ( _lowercase , _lowercase ) -> str | Literal[False]:
UpperCAmelCase : Optional[int] = list(_lowercase )
UpperCAmelCase : Dict = list(_lowercase )
UpperCAmelCase : str = 0
for i in range(len(_lowercase ) ):
if lista[i] != lista[i]:
count += 1
UpperCAmelCase : Optional[Any] = """_"""
if count > 1:
return False
else:
return "".join(_lowercase )
def __lowerCamelCase ( _lowercase ) -> list[str]:
UpperCAmelCase : List[str] = []
while True:
UpperCAmelCase : Optional[int] = ["""$"""] * len(_lowercase )
UpperCAmelCase : int = []
for i in range(len(_lowercase ) ):
for j in range(i + 1 , len(_lowercase ) ):
UpperCAmelCase : str = compare_string(binary[i] , binary[j] )
if k is False:
UpperCAmelCase : Union[str, Any] = """*"""
UpperCAmelCase : Optional[Any] = """*"""
temp.append("""X""" )
for i in range(len(_lowercase ) ):
if checka[i] == "$":
pi.append(binary[i] )
if len(_lowercase ) == 0:
return pi
UpperCAmelCase : List[Any] = list(set(_lowercase ) )
def __lowerCamelCase ( _lowercase , _lowercase ) -> list[str]:
UpperCAmelCase : Dict = []
for minterm in minterms:
UpperCAmelCase : List[str] = """"""
for _ in range(_lowercase ):
UpperCAmelCase : Dict = str(minterm % 2 ) + string
minterm //= 2
temp.append(_lowercase )
return temp
def __lowerCamelCase ( _lowercase , _lowercase , _lowercase ) -> bool:
UpperCAmelCase : Optional[int] = list(_lowercase )
UpperCAmelCase : Dict = list(_lowercase )
UpperCAmelCase : Dict = 0
for i in range(len(_lowercase ) ):
if lista[i] != lista[i]:
count_n += 1
return count_n == count
def __lowerCamelCase ( _lowercase , _lowercase ) -> list[str]:
UpperCAmelCase : Tuple = []
UpperCAmelCase : Optional[int] = [0] * len(_lowercase )
for i in range(len(chart[0] ) ):
UpperCAmelCase : Any = 0
UpperCAmelCase : Optional[Any] = -1
for j in range(len(_lowercase ) ):
if chart[j][i] == 1:
count += 1
UpperCAmelCase : str = j
if count == 1:
UpperCAmelCase : Optional[int] = 1
for i in range(len(_lowercase ) ):
if select[i] == 1:
for j in range(len(chart[0] ) ):
if chart[i][j] == 1:
for k in range(len(_lowercase ) ):
UpperCAmelCase : List[str] = 0
temp.append(prime_implicants[i] )
while True:
UpperCAmelCase : int = 0
UpperCAmelCase : Tuple = -1
UpperCAmelCase : Union[str, Any] = 0
for i in range(len(_lowercase ) ):
UpperCAmelCase : Optional[Any] = chart[i].count(1 )
if count_n > max_n:
UpperCAmelCase : Union[str, Any] = count_n
UpperCAmelCase : Optional[Any] = i
if max_n == 0:
return temp
temp.append(prime_implicants[rem] )
for i in range(len(chart[0] ) ):
if chart[rem][i] == 1:
for j in range(len(_lowercase ) ):
UpperCAmelCase : Optional[Any] = 0
def __lowerCamelCase ( _lowercase , _lowercase ) -> list[list[int]]:
UpperCAmelCase : Optional[int] = [[0 for x in range(len(_lowercase ) )] for x in range(len(_lowercase ) )]
for i in range(len(_lowercase ) ):
UpperCAmelCase : Tuple = prime_implicants[i].count("""_""" )
for j in range(len(_lowercase ) ):
if is_for_table(prime_implicants[i] , binary[j] , _lowercase ):
UpperCAmelCase : List[Any] = 1
return chart
def __lowerCamelCase ( ) -> None:
UpperCAmelCase : str = int(input("""Enter the no. of variables\n""" ) )
UpperCAmelCase : List[Any] = [
float(_lowercase )
for x in input(
"""Enter the decimal representation of Minterms 'Spaces Separated'\n""" ).split()
]
UpperCAmelCase : str = decimal_to_binary(_lowercase , _lowercase )
UpperCAmelCase : Tuple = check(_lowercase )
print("""Prime Implicants are:""" )
print(_lowercase )
UpperCAmelCase : Union[str, Any] = prime_implicant_chart(_lowercase , _lowercase )
UpperCAmelCase : Tuple = selection(_lowercase , _lowercase )
print("""Essential Prime Implicants are:""" )
print(_lowercase )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 672 |
'''simple docstring'''
from ...utils import is_torch_available, is_transformers_available
if is_transformers_available() and is_torch_available():
from .pipeline_vq_diffusion import LearnedClassifierFreeSamplingEmbeddings, VQDiffusionPipeline
| 672 | 1 |
'''simple docstring'''
import unittest
import numpy as np
import torch
from .utils_summarization import build_mask, compute_token_type_ids, process_story, truncate_or_pad
class UpperCamelCase_ ( unittest.TestCase ):
def _lowercase( self ) -> int:
UpperCAmelCase : Dict = 10
def _lowercase( self ) -> Optional[int]:
UpperCAmelCase : Any = [1, 2, 3, 4]
UpperCAmelCase : str = [1, 2, 3, 4, 0, 0, 0, 0, 0, 0]
self.assertEqual(truncate_or_pad(A , self.block_size , 0 ) , A )
def _lowercase( self ) -> List[str]:
UpperCAmelCase : Dict = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
UpperCAmelCase : int = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
self.assertEqual(truncate_or_pad(A , self.block_size , 0 ) , A )
def _lowercase( self ) -> Tuple:
UpperCAmelCase : int = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13]
UpperCAmelCase : List[str] = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
self.assertEqual(truncate_or_pad(A , self.block_size , 0 ) , A )
def _lowercase( self ) -> Union[str, Any]:
UpperCAmelCase : Any = """It was the year of Our Lord one thousand seven hundred and
seventy-five.\n\nSpiritual revelations were conceded to England at that
favoured period, as at this."""
UpperCAmelCase , UpperCAmelCase : Optional[Any] = process_story(A )
self.assertEqual(A , [] )
def _lowercase( self ) -> str:
UpperCAmelCase : List[str] = """"""
UpperCAmelCase , UpperCAmelCase : Union[str, Any] = process_story(A )
self.assertEqual(A , [] )
self.assertEqual(A , [] )
def _lowercase( self ) -> Tuple:
UpperCAmelCase : Dict = (
"""It was the year of Our Lord one thousand seven hundred and """
"""seventy-five\n\nSpiritual revelations were conceded to England """
"""at that favoured period, as at this.\n@highlight\n\nIt was the best of times"""
)
UpperCAmelCase , UpperCAmelCase : Any = process_story(A )
UpperCAmelCase : Dict = [
"""It was the year of Our Lord one thousand seven hundred and seventy-five.""",
"""Spiritual revelations were conceded to England at that favoured period, as at this.""",
]
self.assertEqual(A , A )
UpperCAmelCase : Tuple = ["""It was the best of times."""]
self.assertEqual(A , A )
def _lowercase( self ) -> List[str]:
UpperCAmelCase : Optional[Any] = torch.tensor([1, 2, 3, 4] )
UpperCAmelCase : Tuple = torch.tensor([1, 1, 1, 1] )
np.testing.assert_array_equal(build_mask(A , 0 ).numpy() , expected.numpy() )
def _lowercase( self ) -> Dict:
UpperCAmelCase : Optional[Any] = torch.tensor([1, 2, 3, 4, 23, 23, 23] )
UpperCAmelCase : Union[str, Any] = torch.tensor([1, 1, 1, 1, 0, 0, 0] )
np.testing.assert_array_equal(build_mask(A , 23 ).numpy() , expected.numpy() )
def _lowercase( self ) -> Dict:
UpperCAmelCase : Union[str, Any] = torch.tensor([8, 2, 3, 4, 1, 1, 1] )
UpperCAmelCase : List[Any] = torch.tensor([1, 1, 1, 1, 0, 0, 0] )
np.testing.assert_array_equal(build_mask(A , 1 ).numpy() , expected.numpy() )
def _lowercase( self ) -> Optional[int]:
UpperCAmelCase : Any = 101
UpperCAmelCase : Any = torch.tensor([[1, 2, 3, 4, 5, 6], [1, 2, 3, 101, 5, 6], [1, 101, 3, 4, 101, 6]] )
UpperCAmelCase : Optional[int] = torch.tensor([[1, 1, 1, 1, 1, 1], [1, 1, 1, 0, 0, 0], [1, 0, 0, 0, 1, 1]] )
UpperCAmelCase : Dict = compute_token_type_ids(A , A )
np.testing.assert_array_equal(A , A )
| 672 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_barthez import BarthezTokenizer
else:
a : int = None
a : List[Any] = logging.get_logger(__name__)
a : Dict = {"""vocab_file""": """sentencepiece.bpe.model""", """tokenizer_file""": """tokenizer.json"""}
a : Union[str, Any] = {
"""vocab_file""": {
"""moussaKam/mbarthez""": """https://huggingface.co/moussaKam/mbarthez/resolve/main/sentencepiece.bpe.model""",
"""moussaKam/barthez""": """https://huggingface.co/moussaKam/barthez/resolve/main/sentencepiece.bpe.model""",
"""moussaKam/barthez-orangesum-title""": (
"""https://huggingface.co/moussaKam/barthez-orangesum-title/resolve/main/sentencepiece.bpe.model"""
),
},
"""tokenizer_file""": {
"""moussaKam/mbarthez""": """https://huggingface.co/moussaKam/mbarthez/resolve/main/tokenizer.json""",
"""moussaKam/barthez""": """https://huggingface.co/moussaKam/barthez/resolve/main/tokenizer.json""",
"""moussaKam/barthez-orangesum-title""": (
"""https://huggingface.co/moussaKam/barthez-orangesum-title/resolve/main/tokenizer.json"""
),
},
}
a : List[Any] = {
"""moussaKam/mbarthez""": 1_0_2_4,
"""moussaKam/barthez""": 1_0_2_4,
"""moussaKam/barthez-orangesum-title""": 1_0_2_4,
}
a : int = """▁"""
class UpperCamelCase_ ( __magic_name__ ):
lowercase = VOCAB_FILES_NAMES
lowercase = PRETRAINED_VOCAB_FILES_MAP
lowercase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase = ['input_ids', 'attention_mask']
lowercase = BarthezTokenizer
def __init__( self , A=None , A=None , A="<s>" , A="</s>" , A="</s>" , A="<s>" , A="<unk>" , A="<pad>" , A="<mask>" , **A , ) -> List[Any]:
# Mask token behave like a normal word, i.e. include the space before it
UpperCAmelCase : Tuple = AddedToken(A , lstrip=A , rstrip=A ) if isinstance(A , A ) else mask_token
super().__init__(
A , tokenizer_file=A , bos_token=A , eos_token=A , unk_token=A , sep_token=A , cls_token=A , pad_token=A , mask_token=A , **A , )
UpperCAmelCase : Union[str, Any] = vocab_file
UpperCAmelCase : int = False if not self.vocab_file else True
def _lowercase( self , A , A = None ) -> List[int]:
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
UpperCAmelCase : Optional[int] = [self.cls_token_id]
UpperCAmelCase : List[str] = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def _lowercase( self , A , A = None ) -> List[int]:
UpperCAmelCase : Optional[int] = [self.sep_token_id]
UpperCAmelCase : List[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def _lowercase( self , A , A = None ) -> Tuple[str]:
if not self.can_save_slow_tokenizer:
raise ValueError(
"""Your fast tokenizer does not have the necessary information to save the vocabulary for a slow """
"""tokenizer.""" )
if not os.path.isdir(A ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
UpperCAmelCase : str = os.path.join(
A , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(A ):
copyfile(self.vocab_file , A )
return (out_vocab_file,)
| 672 | 1 |
'''simple docstring'''
import unittest
import numpy as np
import timeout_decorator # noqa
from transformers import BlenderbotConfig, is_flax_available
from transformers.testing_utils import jax_device, require_flax, slow
from ...generation.test_flax_utils import FlaxGenerationTesterMixin
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor
if is_flax_available():
import os
# The slow tests are often failing with OOM error on GPU
# This makes JAX allocate exactly what is needed on demand, and deallocate memory that is no longer needed
# but will be slower as stated here https://jax.readthedocs.io/en/latest/gpu_memory_allocation.html
a : str = """platform"""
import jax
import jax.numpy as jnp
from transformers import BlenderbotTokenizer
from transformers.models.blenderbot.modeling_flax_blenderbot import (
FlaxBlenderbotForConditionalGeneration,
FlaxBlenderbotModel,
shift_tokens_right,
)
def __lowerCamelCase ( _lowercase , _lowercase , _lowercase=None , _lowercase=None , _lowercase=None , _lowercase=None , _lowercase=None , _lowercase=None , ) -> str:
if attention_mask is None:
UpperCAmelCase : Optional[int] = np.where(input_ids != config.pad_token_id , 1 , 0 )
if decoder_attention_mask is None:
UpperCAmelCase : Optional[Any] = np.where(decoder_input_ids != config.pad_token_id , 1 , 0 )
if head_mask is None:
UpperCAmelCase : Any = np.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
UpperCAmelCase : List[str] = np.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
UpperCAmelCase : Optional[int] = np.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": attention_mask,
}
class UpperCamelCase_ :
def __init__( self , A , A=13 , A=7 , A=True , A=False , A=99 , A=16 , A=2 , A=4 , A=4 , A="gelu" , A=0.1 , A=0.1 , A=32 , A=2 , A=1 , A=0 , A=0.0_2 , ) -> Optional[Any]:
UpperCAmelCase : Optional[int] = parent
UpperCAmelCase : Optional[Any] = batch_size
UpperCAmelCase : int = seq_length
UpperCAmelCase : List[Any] = is_training
UpperCAmelCase : Optional[int] = use_labels
UpperCAmelCase : str = vocab_size
UpperCAmelCase : Union[str, Any] = hidden_size
UpperCAmelCase : Any = num_hidden_layers
UpperCAmelCase : int = num_attention_heads
UpperCAmelCase : int = intermediate_size
UpperCAmelCase : Any = hidden_act
UpperCAmelCase : Optional[Any] = hidden_dropout_prob
UpperCAmelCase : int = attention_probs_dropout_prob
UpperCAmelCase : str = max_position_embeddings
UpperCAmelCase : Optional[int] = eos_token_id
UpperCAmelCase : int = pad_token_id
UpperCAmelCase : Optional[int] = bos_token_id
UpperCAmelCase : int = initializer_range
def _lowercase( self ) -> Tuple:
UpperCAmelCase : List[Any] = np.clip(ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ) , 3 , self.vocab_size )
UpperCAmelCase : Optional[Any] = np.concatenate((input_ids, 2 * np.ones((self.batch_size, 1) , dtype=np.intaa )) , -1 )
UpperCAmelCase : List[Any] = shift_tokens_right(A , 1 , 2 )
UpperCAmelCase : Dict = BlenderbotConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , initializer_range=self.initializer_range , use_cache=A , )
UpperCAmelCase : int = prepare_blenderbot_inputs_dict(A , A , A )
return config, inputs_dict
def _lowercase( self ) -> List[str]:
UpperCAmelCase , UpperCAmelCase : Optional[Any] = self.prepare_config_and_inputs()
return config, inputs_dict
def _lowercase( self , A , A , A ) -> List[str]:
UpperCAmelCase : str = 20
UpperCAmelCase : List[str] = model_class_name(A )
UpperCAmelCase : List[str] = model.encode(inputs_dict["""input_ids"""] )
UpperCAmelCase , UpperCAmelCase : int = (
inputs_dict["""decoder_input_ids"""],
inputs_dict["""decoder_attention_mask"""],
)
UpperCAmelCase : Dict = model.init_cache(decoder_input_ids.shape[0] , A , A )
UpperCAmelCase : List[str] = jnp.ones((decoder_input_ids.shape[0], max_decoder_length) , dtype="""i4""" )
UpperCAmelCase : Any = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
UpperCAmelCase : int = model.decode(
decoder_input_ids[:, :-1] , A , decoder_attention_mask=A , past_key_values=A , decoder_position_ids=A , )
UpperCAmelCase : Any = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype="""i4""" )
UpperCAmelCase : Tuple = model.decode(
decoder_input_ids[:, -1:] , A , decoder_attention_mask=A , past_key_values=outputs_cache.past_key_values , decoder_position_ids=A , )
UpperCAmelCase : Optional[Any] = model.decode(A , A )
UpperCAmelCase : List[str] = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1e-3 , msg=f'''Max diff is {diff}''' )
def _lowercase( self , A , A , A ) -> int:
UpperCAmelCase : Optional[int] = 20
UpperCAmelCase : Any = model_class_name(A )
UpperCAmelCase : Tuple = model.encode(inputs_dict["""input_ids"""] )
UpperCAmelCase , UpperCAmelCase : Tuple = (
inputs_dict["""decoder_input_ids"""],
inputs_dict["""decoder_attention_mask"""],
)
UpperCAmelCase : List[Any] = jnp.concatenate(
[
decoder_attention_mask,
jnp.zeros((decoder_attention_mask.shape[0], max_decoder_length - decoder_attention_mask.shape[1]) ),
] , axis=-1 , )
UpperCAmelCase : List[Any] = model.init_cache(decoder_input_ids.shape[0] , A , A )
UpperCAmelCase : Optional[int] = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
UpperCAmelCase : str = model.decode(
decoder_input_ids[:, :-1] , A , decoder_attention_mask=A , past_key_values=A , decoder_position_ids=A , )
UpperCAmelCase : Union[str, Any] = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype="""i4""" )
UpperCAmelCase : Union[str, Any] = model.decode(
decoder_input_ids[:, -1:] , A , past_key_values=outputs_cache.past_key_values , decoder_attention_mask=A , decoder_position_ids=A , )
UpperCAmelCase : List[str] = model.decode(A , A , decoder_attention_mask=A )
UpperCAmelCase : Tuple = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1e-3 , msg=f'''Max diff is {diff}''' )
@require_flax
class UpperCamelCase_ ( unittest.TestCase ):
lowercase = 99
def _lowercase( self ) -> List[Any]:
UpperCAmelCase : Dict = np.array(
[
[71, 82, 18, 33, 46, 91, 2],
[68, 34, 26, 58, 30, 82, 2],
[5, 97, 17, 39, 94, 40, 2],
[76, 83, 94, 25, 70, 78, 2],
[87, 59, 41, 35, 48, 66, 2],
[55, 13, 16, 58, 5, 2, 1], # note padding
[64, 27, 31, 51, 12, 75, 2],
[52, 64, 86, 17, 83, 39, 2],
[48, 61, 9, 24, 71, 82, 2],
[26, 1, 60, 48, 22, 13, 2],
[21, 5, 62, 28, 14, 76, 2],
[45, 98, 37, 86, 59, 48, 2],
[70, 70, 50, 9, 28, 0, 2],
] , dtype=np.intaa , )
UpperCAmelCase : Tuple = input_ids.shape[0]
UpperCAmelCase : Any = BlenderbotConfig(
vocab_size=self.vocab_size , d_model=24 , encoder_layers=2 , decoder_layers=2 , encoder_attention_heads=2 , decoder_attention_heads=2 , encoder_ffn_dim=32 , decoder_ffn_dim=32 , max_position_embeddings=48 , eos_token_id=2 , pad_token_id=1 , bos_token_id=0 , )
return config, input_ids, batch_size
def _lowercase( self ) -> Union[str, Any]:
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : int = self._get_config_and_data()
UpperCAmelCase : List[str] = FlaxBlenderbotForConditionalGeneration(A )
UpperCAmelCase : Optional[Any] = lm_model(input_ids=A )
UpperCAmelCase : List[Any] = (batch_size, input_ids.shape[1], config.vocab_size)
self.assertEqual(outputs["""logits"""].shape , A )
def _lowercase( self ) -> str:
UpperCAmelCase : Union[str, Any] = BlenderbotConfig(
vocab_size=self.vocab_size , d_model=14 , encoder_layers=2 , decoder_layers=2 , encoder_attention_heads=2 , decoder_attention_heads=2 , encoder_ffn_dim=8 , decoder_ffn_dim=8 , max_position_embeddings=48 , )
UpperCAmelCase : List[str] = FlaxBlenderbotForConditionalGeneration(A )
UpperCAmelCase : List[Any] = np.array([[71, 82, 18, 33, 46, 91, 2], [68, 34, 26, 58, 30, 2, 1]] , dtype=np.intaa )
UpperCAmelCase : Dict = np.array([[82, 71, 82, 18, 2], [58, 68, 2, 1, 1]] , dtype=np.intaa )
UpperCAmelCase : Union[str, Any] = lm_model(input_ids=A , decoder_input_ids=A )
UpperCAmelCase : Optional[Any] = (*summary.shape, config.vocab_size)
self.assertEqual(outputs["""logits"""].shape , A )
def _lowercase( self ) -> List[Any]:
UpperCAmelCase : List[str] = np.array([[71, 82, 18, 33, 2, 1, 1], [68, 34, 26, 58, 30, 82, 2]] , dtype=np.intaa )
UpperCAmelCase : Any = shift_tokens_right(A , 1 , 2 )
UpperCAmelCase : str = np.equal(A , 1 ).astype(np.floataa ).sum()
UpperCAmelCase : Any = np.equal(A , 1 ).astype(np.floataa ).sum()
self.assertEqual(shifted.shape , input_ids.shape )
self.assertEqual(A , n_pad_before - 1 )
self.assertTrue(np.equal(shifted[:, 0] , 2 ).all() )
@require_flax
class UpperCamelCase_ ( __magic_name__ , unittest.TestCase , __magic_name__ ):
lowercase = True
lowercase = (
(
FlaxBlenderbotModel,
FlaxBlenderbotForConditionalGeneration,
)
if is_flax_available()
else ()
)
lowercase = (FlaxBlenderbotForConditionalGeneration,) if is_flax_available() else ()
def _lowercase( self ) -> str:
UpperCAmelCase : List[str] = FlaxBlenderbotModelTester(self )
def _lowercase( self ) -> Optional[int]:
UpperCAmelCase , UpperCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward(A , A , A )
def _lowercase( self ) -> int:
UpperCAmelCase , UpperCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward_with_attn_mask(A , A , A )
def _lowercase( self ) -> Optional[Any]:
UpperCAmelCase , UpperCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
UpperCAmelCase : Any = self._prepare_for_class(A , A )
UpperCAmelCase : List[str] = model_class(A )
@jax.jit
def encode_jitted(A , A=None , **A ):
return model.encode(input_ids=A , attention_mask=A )
with self.subTest("""JIT Enabled""" ):
UpperCAmelCase : Optional[int] = encode_jitted(**A ).to_tuple()
with self.subTest("""JIT Disabled""" ):
with jax.disable_jit():
UpperCAmelCase : Union[str, Any] = encode_jitted(**A ).to_tuple()
self.assertEqual(len(A ) , len(A ) )
for jitted_output, output in zip(A , A ):
self.assertEqual(jitted_output.shape , output.shape )
def _lowercase( self ) -> Optional[int]:
UpperCAmelCase , UpperCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
UpperCAmelCase : Dict = model_class(A )
UpperCAmelCase : Optional[int] = model.encode(inputs_dict["""input_ids"""] , inputs_dict["""attention_mask"""] )
UpperCAmelCase : List[Any] = {
"""decoder_input_ids""": inputs_dict["""decoder_input_ids"""],
"""decoder_attention_mask""": inputs_dict["""decoder_attention_mask"""],
"""encoder_outputs""": encoder_outputs,
}
@jax.jit
def decode_jitted(A , A , A ):
return model.decode(
decoder_input_ids=A , decoder_attention_mask=A , encoder_outputs=A , )
with self.subTest("""JIT Enabled""" ):
UpperCAmelCase : str = decode_jitted(**A ).to_tuple()
with self.subTest("""JIT Disabled""" ):
with jax.disable_jit():
UpperCAmelCase : Optional[Any] = decode_jitted(**A ).to_tuple()
self.assertEqual(len(A ) , len(A ) )
for jitted_output, output in zip(A , A ):
self.assertEqual(jitted_output.shape , output.shape )
@slow
def _lowercase( self ) -> Optional[Any]:
for model_class_name in self.all_model_classes:
UpperCAmelCase : List[str] = model_class_name.from_pretrained("""facebook/blenderbot-400M-distill""" )
# FlaxBlenderbotForSequenceClassification expects eos token in input_ids
UpperCAmelCase : Union[str, Any] = np.ones((1, 1) ) * model.config.eos_token_id
UpperCAmelCase : Tuple = model(A )
self.assertIsNotNone(A )
@unittest.skipUnless(jax_device != """cpu""" , """3B test too slow on CPU.""" )
@slow
def _lowercase( self ) -> List[Any]:
UpperCAmelCase : Optional[Any] = {"""num_beams""": 1, """early_stopping""": True, """min_length""": 15, """max_length""": 25}
UpperCAmelCase : Tuple = {"""skip_special_tokens""": True, """clean_up_tokenization_spaces""": True}
UpperCAmelCase : int = FlaxBlenderbotForConditionalGeneration.from_pretrained("""facebook/blenderbot-3B""" , from_pt=A )
UpperCAmelCase : Any = BlenderbotTokenizer.from_pretrained("""facebook/blenderbot-3B""" )
UpperCAmelCase : Union[str, Any] = ["""Sam"""]
UpperCAmelCase : Any = tokenizer(A , return_tensors="""jax""" )
UpperCAmelCase : Optional[Any] = model.generate(**A , **A )
UpperCAmelCase : Tuple = """Sam is a great name. It means \"sun\" in Gaelic."""
UpperCAmelCase : List[Any] = tokenizer.batch_decode(A , **A )
assert generated_txt[0].strip() == tgt_text
| 672 |
'''simple docstring'''
from collections.abc import Callable
import numpy as np
def __lowerCamelCase ( _lowercase , _lowercase , _lowercase , _lowercase , _lowercase ) -> np.array:
UpperCAmelCase : Optional[Any] = int(np.ceil((x_end - xa) / step_size ) )
UpperCAmelCase : str = np.zeros((n + 1,) )
UpperCAmelCase : Optional[Any] = ya
UpperCAmelCase : Union[str, Any] = xa
for k in range(_lowercase ):
UpperCAmelCase : Dict = y[k] + step_size * ode_func(_lowercase , y[k] )
UpperCAmelCase : Optional[int] = y[k] + (
(step_size / 2) * (ode_func(_lowercase , y[k] ) + ode_func(x + step_size , _lowercase ))
)
x += step_size
return y
if __name__ == "__main__":
import doctest
doctest.testmod()
| 672 | 1 |
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
if is_tf_available():
import numpy as np
import tensorflow as tf
from transformers import TFXLMRobertaModel
@require_tf
@require_sentencepiece
@require_tokenizers
class UpperCamelCase_ ( unittest.TestCase ):
@slow
def _lowercase( self ) -> str:
UpperCAmelCase : Optional[Any] = TFXLMRobertaModel.from_pretrained("""jplu/tf-xlm-roberta-base""" )
UpperCAmelCase : str = {
"""input_ids""": tf.convert_to_tensor([[0, 2646, 10269, 83, 99942, 2]] , dtype=tf.intaa ), # "My dog is cute"
"""attention_mask""": tf.convert_to_tensor([[1, 1, 1, 1, 1, 1]] , dtype=tf.intaa ),
}
UpperCAmelCase : Union[str, Any] = model(A )["""last_hidden_state"""]
UpperCAmelCase : int = tf.TensorShape((1, 6, 768) )
self.assertEqual(output.shape , A )
# compare the actual values for a slice.
UpperCAmelCase : Optional[int] = tf.convert_to_tensor(
[
[
[0.0_6_8_1_7_6_2, 0.1_0_8_9_4_4_5_1, 0.0_6_7_7_2_5_0_4],
[-0.0_6_4_2_3_6_6_8, 0.0_2_3_6_6_6_1_5, 0.0_4_3_2_9_3_4_4],
[-0.0_6_0_5_7_2_9_5, 0.0_9_9_7_4_1_3_5, -0.0_0_0_7_0_5_8_4],
]
] , dtype=tf.floataa , )
self.assertTrue(np.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1e-4 ) )
| 672 |
'''simple docstring'''
from typing import List, Union
import numpy as np
from ..tokenization_utils import TruncationStrategy
from ..utils import add_end_docstrings, logging
from .base import PIPELINE_INIT_ARGS, ArgumentHandler, ChunkPipeline
a : List[str] = logging.get_logger(__name__)
class UpperCamelCase_ ( __magic_name__ ):
def _lowercase( self , A ) -> Optional[int]:
if isinstance(A , A ):
UpperCAmelCase : Union[str, Any] = [label.strip() for label in labels.split(""",""" ) if label.strip()]
return labels
def __call__( self , A , A , A ) -> str:
if len(A ) == 0 or len(A ) == 0:
raise ValueError("""You must include at least one label and at least one sequence.""" )
if hypothesis_template.format(labels[0] ) == hypothesis_template:
raise ValueError(
(
"""The provided hypothesis_template \"{}\" was not able to be formatted with the target labels. """
"""Make sure the passed template includes formatting syntax such as {{}} where the label should go."""
).format(A ) )
if isinstance(A , A ):
UpperCAmelCase : Tuple = [sequences]
UpperCAmelCase : Optional[Any] = []
for sequence in sequences:
sequence_pairs.extend([[sequence, hypothesis_template.format(A )] for label in labels] )
return sequence_pairs, sequences
@add_end_docstrings(__magic_name__ )
class UpperCamelCase_ ( __magic_name__ ):
def __init__( self , A=ZeroShotClassificationArgumentHandler() , *A , **A ) -> Optional[int]:
UpperCAmelCase : Tuple = args_parser
super().__init__(*A , **A )
if self.entailment_id == -1:
logger.warning(
"""Failed to determine 'entailment' label id from the label2id mapping in the model config. Setting to """
"""-1. Define a descriptive label2id mapping in the model config to ensure correct outputs.""" )
@property
def _lowercase( self ) -> List[Any]:
for label, ind in self.model.config.labelaid.items():
if label.lower().startswith("""entail""" ):
return ind
return -1
def _lowercase( self , A , A=True , A=True , A=TruncationStrategy.ONLY_FIRST , **A ) -> str:
UpperCAmelCase : Tuple = self.framework
if self.tokenizer.pad_token is None:
# Override for tokenizers not supporting padding
logger.error(
"""Tokenizer was not supporting padding necessary for zero-shot, attempting to use """
""" `pad_token=eos_token`""" )
UpperCAmelCase : Any = self.tokenizer.eos_token
try:
UpperCAmelCase : Tuple = self.tokenizer(
A , add_special_tokens=A , return_tensors=A , padding=A , truncation=A , )
except Exception as e:
if "too short" in str(A ):
# tokenizers might yell that we want to truncate
# to a value that is not even reached by the input.
# In that case we don't want to truncate.
# It seems there's not a really better way to catch that
# exception.
UpperCAmelCase : List[str] = self.tokenizer(
A , add_special_tokens=A , return_tensors=A , padding=A , truncation=TruncationStrategy.DO_NOT_TRUNCATE , )
else:
raise e
return inputs
def _lowercase( self , **A ) -> Tuple:
if kwargs.get("""multi_class""" , A ) is not None:
UpperCAmelCase : Any = kwargs["""multi_class"""]
logger.warning(
"""The `multi_class` argument has been deprecated and renamed to `multi_label`. """
"""`multi_class` will be removed in a future version of Transformers.""" )
UpperCAmelCase : int = {}
if "candidate_labels" in kwargs:
UpperCAmelCase : Tuple = self._args_parser._parse_labels(kwargs["""candidate_labels"""] )
if "hypothesis_template" in kwargs:
UpperCAmelCase : List[Any] = kwargs["""hypothesis_template"""]
UpperCAmelCase : Dict = {}
if "multi_label" in kwargs:
UpperCAmelCase : Union[str, Any] = kwargs["""multi_label"""]
return preprocess_params, {}, postprocess_params
def __call__( self , A , *A , **A , ) -> Tuple:
if len(A ) == 0:
pass
elif len(A ) == 1 and "candidate_labels" not in kwargs:
UpperCAmelCase : Optional[Any] = args[0]
else:
raise ValueError(f'''Unable to understand extra arguments {args}''' )
return super().__call__(A , **A )
def _lowercase( self , A , A=None , A="This example is {}." ) -> List[Any]:
UpperCAmelCase , UpperCAmelCase : List[Any] = self._args_parser(A , A , A )
for i, (candidate_label, sequence_pair) in enumerate(zip(A , A ) ):
UpperCAmelCase : Any = self._parse_and_tokenize([sequence_pair] )
yield {
"candidate_label": candidate_label,
"sequence": sequences[0],
"is_last": i == len(A ) - 1,
**model_input,
}
def _lowercase( self , A ) -> Optional[int]:
UpperCAmelCase : Optional[Any] = inputs["""candidate_label"""]
UpperCAmelCase : Tuple = inputs["""sequence"""]
UpperCAmelCase : List[Any] = {k: inputs[k] for k in self.tokenizer.model_input_names}
UpperCAmelCase : Tuple = self.model(**A )
UpperCAmelCase : Optional[int] = {
"""candidate_label""": candidate_label,
"""sequence""": sequence,
"""is_last""": inputs["""is_last"""],
**outputs,
}
return model_outputs
def _lowercase( self , A , A=False ) -> List[str]:
UpperCAmelCase : Dict = [outputs["""candidate_label"""] for outputs in model_outputs]
UpperCAmelCase : List[Any] = [outputs["""sequence"""] for outputs in model_outputs]
UpperCAmelCase : List[Any] = np.concatenate([output["""logits"""].numpy() for output in model_outputs] )
UpperCAmelCase : Optional[Any] = logits.shape[0]
UpperCAmelCase : int = len(A )
UpperCAmelCase : List[Any] = N // n
UpperCAmelCase : int = logits.reshape((num_sequences, n, -1) )
if multi_label or len(A ) == 1:
# softmax over the entailment vs. contradiction dim for each label independently
UpperCAmelCase : str = self.entailment_id
UpperCAmelCase : str = -1 if entailment_id == 0 else 0
UpperCAmelCase : Optional[Any] = reshaped_outputs[..., [contradiction_id, entailment_id]]
UpperCAmelCase : int = np.exp(A ) / np.exp(A ).sum(-1 , keepdims=A )
UpperCAmelCase : int = scores[..., 1]
else:
# softmax the "entailment" logits over all candidate labels
UpperCAmelCase : Dict = reshaped_outputs[..., self.entailment_id]
UpperCAmelCase : Optional[int] = np.exp(A ) / np.exp(A ).sum(-1 , keepdims=A )
UpperCAmelCase : int = list(reversed(scores[0].argsort() ) )
return {
"sequence": sequences[0],
"labels": [candidate_labels[i] for i in top_inds],
"scores": scores[0, top_inds].tolist(),
}
| 672 | 1 |
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from tokenizers.pre_tokenizers import BertPreTokenizer, PreTokenizer
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_roformer import RoFormerTokenizer
from .tokenization_utils import JiebaPreTokenizer
a : Union[str, Any] = logging.get_logger(__name__)
a : Dict = {"""vocab_file""": """vocab.txt""", """tokenizer_file""": """tokenizer.json"""}
a : List[str] = {
"""vocab_file""": {
"""junnyu/roformer_chinese_small""": """https://huggingface.co/junnyu/roformer_chinese_small/resolve/main/vocab.txt""",
"""junnyu/roformer_chinese_base""": """https://huggingface.co/junnyu/roformer_chinese_base/resolve/main/vocab.txt""",
"""junnyu/roformer_chinese_char_small""": (
"""https://huggingface.co/junnyu/roformer_chinese_char_small/resolve/main/vocab.txt"""
),
"""junnyu/roformer_chinese_char_base""": (
"""https://huggingface.co/junnyu/roformer_chinese_char_base/resolve/main/vocab.txt"""
),
"""junnyu/roformer_small_discriminator""": (
"""https://huggingface.co/junnyu/roformer_small_discriminator/resolve/main/vocab.txt"""
),
"""junnyu/roformer_small_generator""": (
"""https://huggingface.co/junnyu/roformer_small_generator/resolve/main/vocab.txt"""
),
}
}
a : Optional[Any] = {
"""junnyu/roformer_chinese_small""": 1_5_3_6,
"""junnyu/roformer_chinese_base""": 1_5_3_6,
"""junnyu/roformer_chinese_char_small""": 5_1_2,
"""junnyu/roformer_chinese_char_base""": 5_1_2,
"""junnyu/roformer_small_discriminator""": 1_2_8,
"""junnyu/roformer_small_generator""": 1_2_8,
}
a : int = {
"""junnyu/roformer_chinese_small""": {"""do_lower_case""": True},
"""junnyu/roformer_chinese_base""": {"""do_lower_case""": True},
"""junnyu/roformer_chinese_char_small""": {"""do_lower_case""": True},
"""junnyu/roformer_chinese_char_base""": {"""do_lower_case""": True},
"""junnyu/roformer_small_discriminator""": {"""do_lower_case""": True},
"""junnyu/roformer_small_generator""": {"""do_lower_case""": True},
}
class UpperCamelCase_ ( __magic_name__ ):
lowercase = VOCAB_FILES_NAMES
lowercase = PRETRAINED_VOCAB_FILES_MAP
lowercase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase = PRETRAINED_INIT_CONFIGURATION
lowercase = RoFormerTokenizer
def __init__( self , A=None , A=None , A=True , A="[UNK]" , A="[SEP]" , A="[PAD]" , A="[CLS]" , A="[MASK]" , A=True , A=None , **A , ) -> Tuple:
super().__init__(
A , tokenizer_file=A , do_lower_case=A , unk_token=A , sep_token=A , pad_token=A , cls_token=A , mask_token=A , tokenize_chinese_chars=A , strip_accents=A , **A , )
UpperCAmelCase : Any = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
pre_tok_state.get("""lowercase""" , A ) != do_lower_case
or pre_tok_state.get("""strip_accents""" , A ) != strip_accents
):
UpperCAmelCase : Any = getattr(A , pre_tok_state.pop("""type""" ) )
UpperCAmelCase : str = do_lower_case
UpperCAmelCase : Optional[Any] = strip_accents
UpperCAmelCase : Optional[int] = pre_tok_class(**A )
UpperCAmelCase : str = do_lower_case
def __getstate__( self ) -> Any:
UpperCAmelCase : List[Any] = self.__dict__.copy()
UpperCAmelCase : Tuple = BertPreTokenizer()
return state
def __setstate__( self , A ) -> List[Any]:
UpperCAmelCase : int = d
UpperCAmelCase : List[str] = self.__dict__["""_tokenizer"""].get_vocab()
UpperCAmelCase : Optional[int] = PreTokenizer.custom(JiebaPreTokenizer(A ) )
def _lowercase( self , A , A=None ) -> Tuple:
UpperCAmelCase : List[Any] = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def _lowercase( self , A , A = None ) -> List[int]:
UpperCAmelCase : Optional[Any] = [self.sep_token_id]
UpperCAmelCase : Dict = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def _lowercase( self , A , A = None ) -> Tuple[str]:
UpperCAmelCase : Dict = self._tokenizer.model.save(A , name=A )
return tuple(A )
def _lowercase( self , A , A=None , A=None , A=False , **A , ) -> List[Any]:
UpperCAmelCase : Optional[int] = BertPreTokenizer()
return super().save_pretrained(A , A , A , A , **A )
| 672 |
'''simple docstring'''
import unittest
from transformers import PegasusTokenizer, PegasusTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
a : List[Any] = get_tests_dir("""fixtures/test_sentencepiece_no_bos.model""")
@require_sentencepiece
@require_tokenizers
class UpperCamelCase_ ( __magic_name__ , unittest.TestCase ):
lowercase = PegasusTokenizer
lowercase = PegasusTokenizerFast
lowercase = True
lowercase = True
def _lowercase( self ) -> Tuple:
super().setUp()
# We have a SentencePiece fixture for testing
UpperCAmelCase : int = PegasusTokenizer(A )
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def _lowercase( self ) -> int:
return PegasusTokenizer.from_pretrained("""google/pegasus-large""" )
def _lowercase( self , **A ) -> PegasusTokenizer:
return PegasusTokenizer.from_pretrained(self.tmpdirname , **A )
def _lowercase( self , A ) -> List[str]:
return ("This is a test", "This is a test")
def _lowercase( self ) -> Optional[Any]:
UpperCAmelCase : Optional[int] = """</s>"""
UpperCAmelCase : Optional[int] = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(A ) , A )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(A ) , A )
def _lowercase( self ) -> Tuple:
UpperCAmelCase : str = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """<pad>""" )
self.assertEqual(vocab_keys[1] , """</s>""" )
self.assertEqual(vocab_keys[-1] , """v""" )
self.assertEqual(len(A ) , 1103 )
def _lowercase( self ) -> Optional[Any]:
self.assertEqual(self.get_tokenizer().vocab_size , 1103 )
def _lowercase( self ) -> int:
UpperCAmelCase : Any = self.rust_tokenizer_class.from_pretrained(self.tmpdirname )
UpperCAmelCase : Optional[Any] = self.tokenizer_class.from_pretrained(self.tmpdirname )
UpperCAmelCase : Union[str, Any] = (
"""Let's see which <unk> is the better <unk_token_11> one <mask_1> It seems like this <mask_2> was important"""
""" </s> <pad> <pad> <pad>"""
)
UpperCAmelCase : Optional[Any] = rust_tokenizer([raw_input_str] , return_tensors=A , add_special_tokens=A ).input_ids[0]
UpperCAmelCase : Optional[Any] = py_tokenizer([raw_input_str] , return_tensors=A , add_special_tokens=A ).input_ids[0]
self.assertListEqual(A , A )
def _lowercase( self ) -> Optional[Any]:
UpperCAmelCase : List[Any] = self._large_tokenizer
# <mask_1> masks whole sentence while <mask_2> masks single word
UpperCAmelCase : Any = """<mask_1> To ensure a <mask_2> flow of bank resolutions."""
UpperCAmelCase : Optional[Any] = [2, 413, 615, 114, 3, 1971, 113, 1679, 10710, 107, 1]
UpperCAmelCase : Optional[Any] = tokenizer([raw_input_str] , return_tensors=A ).input_ids[0]
self.assertListEqual(A , A )
def _lowercase( self ) -> int:
UpperCAmelCase : str = self._large_tokenizer
# The tracebacks for the following asserts are **better** without messages or self.assertEqual
assert tokenizer.vocab_size == 96103
assert tokenizer.pad_token_id == 0
assert tokenizer.eos_token_id == 1
assert tokenizer.offset == 103
assert tokenizer.unk_token_id == tokenizer.offset + 2 == 105
assert tokenizer.unk_token == "<unk>"
assert tokenizer.model_max_length == 1024
UpperCAmelCase : List[Any] = """To ensure a smooth flow of bank resolutions."""
UpperCAmelCase : Optional[int] = [413, 615, 114, 2291, 1971, 113, 1679, 10710, 107, 1]
UpperCAmelCase : Union[str, Any] = tokenizer([raw_input_str] , return_tensors=A ).input_ids[0]
self.assertListEqual(A , A )
assert tokenizer.convert_ids_to_tokens([0, 1, 2, 3] ) == ["<pad>", "</s>", "<mask_1>", "<mask_2>"]
@require_torch
def _lowercase( self ) -> Any:
UpperCAmelCase : int = ["""This is going to be way too long.""" * 150, """short example"""]
UpperCAmelCase : Optional[int] = ["""not super long but more than 5 tokens""", """tiny"""]
UpperCAmelCase : Tuple = self._large_tokenizer(A , padding=A , truncation=A , return_tensors="""pt""" )
UpperCAmelCase : List[Any] = self._large_tokenizer(
text_target=A , max_length=5 , padding=A , truncation=A , return_tensors="""pt""" )
assert batch.input_ids.shape == (2, 1024)
assert batch.attention_mask.shape == (2, 1024)
assert targets["input_ids"].shape == (2, 5)
assert len(A ) == 2 # input_ids, attention_mask.
@slow
def _lowercase( self ) -> List[str]:
# fmt: off
UpperCAmelCase : List[str] = {"""input_ids""": [[38979, 143, 18485, 606, 130, 26669, 87686, 121, 54189, 1129, 111, 26669, 87686, 121, 9114, 14787, 121, 13249, 158, 592, 956, 121, 14621, 31576, 143, 62613, 108, 9688, 930, 43430, 11562, 62613, 304, 108, 11443, 897, 108, 9314, 17415, 63399, 108, 11443, 7614, 18316, 118, 4284, 7148, 12430, 143, 1400, 25703, 158, 111, 4284, 7148, 11772, 143, 21297, 1064, 158, 122, 204, 3506, 1754, 1133, 14787, 1581, 115, 33224, 4482, 111, 1355, 110, 29173, 317, 50833, 108, 20147, 94665, 111, 77198, 107, 1], [110, 62613, 117, 638, 112, 1133, 121, 20098, 1355, 79050, 13872, 135, 1596, 53541, 1352, 141, 13039, 5542, 124, 302, 518, 111, 268, 2956, 115, 149, 4427, 107, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [139, 1235, 2799, 18289, 17780, 204, 109, 9474, 1296, 107, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=A , model_name="""google/bigbird-pegasus-large-arxiv""" , revision="""ba85d0851d708441f91440d509690f1ab6353415""" , )
@require_sentencepiece
@require_tokenizers
class UpperCamelCase_ ( __magic_name__ , unittest.TestCase ):
lowercase = PegasusTokenizer
lowercase = PegasusTokenizerFast
lowercase = True
lowercase = True
def _lowercase( self ) -> List[Any]:
super().setUp()
# We have a SentencePiece fixture for testing
UpperCAmelCase : int = PegasusTokenizer(A , offset=0 , mask_token_sent=A , mask_token="""[MASK]""" )
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def _lowercase( self ) -> Optional[Any]:
return PegasusTokenizer.from_pretrained("""google/bigbird-pegasus-large-arxiv""" )
def _lowercase( self , **A ) -> PegasusTokenizer:
return PegasusTokenizer.from_pretrained(self.tmpdirname , **A )
def _lowercase( self , A ) -> str:
return ("This is a test", "This is a test")
def _lowercase( self ) -> Union[str, Any]:
UpperCAmelCase : Dict = self.rust_tokenizer_class.from_pretrained(self.tmpdirname )
UpperCAmelCase : Any = self.tokenizer_class.from_pretrained(self.tmpdirname )
UpperCAmelCase : str = (
"""Let's see which <unk> is the better <unk_token> one [MASK] It seems like this [MASK] was important </s>"""
""" <pad> <pad> <pad>"""
)
UpperCAmelCase : List[str] = rust_tokenizer([raw_input_str] , return_tensors=A , add_special_tokens=A ).input_ids[0]
UpperCAmelCase : str = py_tokenizer([raw_input_str] , return_tensors=A , add_special_tokens=A ).input_ids[0]
self.assertListEqual(A , A )
@require_torch
def _lowercase( self ) -> Optional[int]:
UpperCAmelCase : Union[str, Any] = ["""This is going to be way too long.""" * 1000, """short example"""]
UpperCAmelCase : Any = ["""not super long but more than 5 tokens""", """tiny"""]
UpperCAmelCase : int = self._large_tokenizer(A , padding=A , truncation=A , return_tensors="""pt""" )
UpperCAmelCase : Optional[int] = self._large_tokenizer(
text_target=A , max_length=5 , padding=A , truncation=A , return_tensors="""pt""" )
assert batch.input_ids.shape == (2, 4096)
assert batch.attention_mask.shape == (2, 4096)
assert targets["input_ids"].shape == (2, 5)
assert len(A ) == 2 # input_ids, attention_mask.
def _lowercase( self ) -> int:
UpperCAmelCase : Union[str, Any] = (
"""This is an example string that is used to test the original TF implementation against the HF"""
""" implementation"""
)
UpperCAmelCase : Optional[Any] = self._large_tokenizer(A ).input_ids
self.assertListEqual(
A , [182, 117, 142, 587, 4211, 120, 117, 263, 112, 804, 109, 856, 25016, 3137, 464, 109, 26955, 3137, 1] , )
| 672 | 1 |
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import DeiTConfig, DeiTForImageClassificationWithTeacher, DeiTImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
a : Optional[Any] = logging.get_logger(__name__)
def __lowerCamelCase ( _lowercase , _lowercase=False ) -> str:
UpperCAmelCase : Tuple = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F'''blocks.{i}.norm1.weight''', F'''deit.encoder.layer.{i}.layernorm_before.weight''') )
rename_keys.append((F'''blocks.{i}.norm1.bias''', F'''deit.encoder.layer.{i}.layernorm_before.bias''') )
rename_keys.append((F'''blocks.{i}.attn.proj.weight''', F'''deit.encoder.layer.{i}.attention.output.dense.weight''') )
rename_keys.append((F'''blocks.{i}.attn.proj.bias''', F'''deit.encoder.layer.{i}.attention.output.dense.bias''') )
rename_keys.append((F'''blocks.{i}.norm2.weight''', F'''deit.encoder.layer.{i}.layernorm_after.weight''') )
rename_keys.append((F'''blocks.{i}.norm2.bias''', F'''deit.encoder.layer.{i}.layernorm_after.bias''') )
rename_keys.append((F'''blocks.{i}.mlp.fc1.weight''', F'''deit.encoder.layer.{i}.intermediate.dense.weight''') )
rename_keys.append((F'''blocks.{i}.mlp.fc1.bias''', F'''deit.encoder.layer.{i}.intermediate.dense.bias''') )
rename_keys.append((F'''blocks.{i}.mlp.fc2.weight''', F'''deit.encoder.layer.{i}.output.dense.weight''') )
rename_keys.append((F'''blocks.{i}.mlp.fc2.bias''', F'''deit.encoder.layer.{i}.output.dense.bias''') )
# projection layer + position embeddings
rename_keys.extend(
[
("""cls_token""", """deit.embeddings.cls_token"""),
("""dist_token""", """deit.embeddings.distillation_token"""),
("""patch_embed.proj.weight""", """deit.embeddings.patch_embeddings.projection.weight"""),
("""patch_embed.proj.bias""", """deit.embeddings.patch_embeddings.projection.bias"""),
("""pos_embed""", """deit.embeddings.position_embeddings"""),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
("""norm.weight""", """layernorm.weight"""),
("""norm.bias""", """layernorm.bias"""),
("""pre_logits.fc.weight""", """pooler.dense.weight"""),
("""pre_logits.fc.bias""", """pooler.dense.bias"""),
] )
# if just the base model, we should remove "deit" from all keys that start with "deit"
UpperCAmelCase : Optional[int] = [(pair[0], pair[1][4:]) if pair[1].startswith("""deit""" ) else pair for pair in rename_keys]
else:
# layernorm + classification heads
rename_keys.extend(
[
("""norm.weight""", """deit.layernorm.weight"""),
("""norm.bias""", """deit.layernorm.bias"""),
("""head.weight""", """cls_classifier.weight"""),
("""head.bias""", """cls_classifier.bias"""),
("""head_dist.weight""", """distillation_classifier.weight"""),
("""head_dist.bias""", """distillation_classifier.bias"""),
] )
return rename_keys
def __lowerCamelCase ( _lowercase , _lowercase , _lowercase=False ) -> List[Any]:
for i in range(config.num_hidden_layers ):
if base_model:
UpperCAmelCase : Optional[Any] = """"""
else:
UpperCAmelCase : Optional[Any] = """deit."""
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
UpperCAmelCase : str = state_dict.pop(F'''blocks.{i}.attn.qkv.weight''' )
UpperCAmelCase : Dict = state_dict.pop(F'''blocks.{i}.attn.qkv.bias''' )
# next, add query, keys and values (in that order) to the state dict
UpperCAmelCase : int = in_proj_weight[
: config.hidden_size, :
]
UpperCAmelCase : Optional[Any] = in_proj_bias[: config.hidden_size]
UpperCAmelCase : List[Any] = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
UpperCAmelCase : List[str] = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
UpperCAmelCase : Optional[Any] = in_proj_weight[
-config.hidden_size :, :
]
UpperCAmelCase : str = in_proj_bias[-config.hidden_size :]
def __lowerCamelCase ( _lowercase , _lowercase , _lowercase ) -> Optional[int]:
UpperCAmelCase : Optional[Any] = dct.pop(_lowercase )
UpperCAmelCase : Any = val
def __lowerCamelCase ( ) -> Tuple:
UpperCAmelCase : Union[str, Any] = """http://images.cocodataset.org/val2017/000000039769.jpg"""
UpperCAmelCase : int = Image.open(requests.get(_lowercase , stream=_lowercase ).raw )
return im
@torch.no_grad()
def __lowerCamelCase ( _lowercase , _lowercase ) -> Tuple:
UpperCAmelCase : List[str] = DeiTConfig()
# all deit models have fine-tuned heads
UpperCAmelCase : Any = False
# dataset (fine-tuned on ImageNet 2012), patch_size and image_size
UpperCAmelCase : Optional[Any] = 1_0_0_0
UpperCAmelCase : int = """huggingface/label-files"""
UpperCAmelCase : List[str] = """imagenet-1k-id2label.json"""
UpperCAmelCase : Optional[Any] = json.load(open(hf_hub_download(_lowercase , _lowercase , repo_type="""dataset""" ) , """r""" ) )
UpperCAmelCase : Dict = {int(_lowercase ): v for k, v in idalabel.items()}
UpperCAmelCase : Any = idalabel
UpperCAmelCase : Optional[int] = {v: k for k, v in idalabel.items()}
UpperCAmelCase : Dict = int(deit_name[-6:-4] )
UpperCAmelCase : str = int(deit_name[-3:] )
# size of the architecture
if deit_name[9:].startswith("""tiny""" ):
UpperCAmelCase : Tuple = 1_9_2
UpperCAmelCase : List[Any] = 7_6_8
UpperCAmelCase : Tuple = 1_2
UpperCAmelCase : Optional[int] = 3
elif deit_name[9:].startswith("""small""" ):
UpperCAmelCase : Dict = 3_8_4
UpperCAmelCase : Tuple = 1_5_3_6
UpperCAmelCase : List[Any] = 1_2
UpperCAmelCase : List[str] = 6
if deit_name[9:].startswith("""base""" ):
pass
elif deit_name[4:].startswith("""large""" ):
UpperCAmelCase : List[Any] = 1_0_2_4
UpperCAmelCase : Tuple = 4_0_9_6
UpperCAmelCase : Union[str, Any] = 2_4
UpperCAmelCase : Any = 1_6
# load original model from timm
UpperCAmelCase : Optional[int] = timm.create_model(_lowercase , pretrained=_lowercase )
timm_model.eval()
# load state_dict of original model, remove and rename some keys
UpperCAmelCase : Dict = timm_model.state_dict()
UpperCAmelCase : Union[str, Any] = create_rename_keys(_lowercase , _lowercase )
for src, dest in rename_keys:
rename_key(_lowercase , _lowercase , _lowercase )
read_in_q_k_v(_lowercase , _lowercase , _lowercase )
# load HuggingFace model
UpperCAmelCase : List[Any] = DeiTForImageClassificationWithTeacher(_lowercase ).eval()
model.load_state_dict(_lowercase )
# Check outputs on an image, prepared by DeiTImageProcessor
UpperCAmelCase : List[Any] = int(
(2_5_6 / 2_2_4) * config.image_size ) # to maintain same ratio w.r.t. 224 images, see https://github.com/facebookresearch/deit/blob/ab5715372db8c6cad5740714b2216d55aeae052e/datasets.py#L103
UpperCAmelCase : List[str] = DeiTImageProcessor(size=_lowercase , crop_size=config.image_size )
UpperCAmelCase : str = image_processor(images=prepare_img() , return_tensors="""pt""" )
UpperCAmelCase : Tuple = encoding["""pixel_values"""]
UpperCAmelCase : Optional[int] = model(_lowercase )
UpperCAmelCase : str = timm_model(_lowercase )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(_lowercase , outputs.logits , atol=1e-3 )
Path(_lowercase ).mkdir(exist_ok=_lowercase )
print(F'''Saving model {deit_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(_lowercase )
print(F'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(_lowercase )
if __name__ == "__main__":
a : str = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--deit_name""",
default="""vit_deit_base_distilled_patch16_224""",
type=str,
help="""Name of the DeiT timm model you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
a : List[str] = parser.parse_args()
convert_deit_checkpoint(args.deit_name, args.pytorch_dump_folder_path)
| 672 |
'''simple docstring'''
import inspect
import unittest
from transformers import ConvNextVaConfig
from transformers.models.auto import get_values
from transformers.models.auto.modeling_auto import MODEL_FOR_BACKBONE_MAPPING_NAMES, MODEL_MAPPING_NAMES
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import ConvNextVaBackbone, ConvNextVaForImageClassification, ConvNextVaModel
from transformers.models.convnextva.modeling_convnextva import CONVNEXTV2_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class UpperCamelCase_ :
def __init__( self , A , A=13 , A=32 , A=3 , A=4 , A=[10, 20, 30, 40] , A=[2, 2, 3, 2] , A=True , A=True , A=37 , A="gelu" , A=10 , A=0.0_2 , A=["stage2", "stage3", "stage4"] , A=[2, 3, 4] , A=None , ) -> int:
UpperCAmelCase : str = parent
UpperCAmelCase : List[Any] = batch_size
UpperCAmelCase : Dict = image_size
UpperCAmelCase : Tuple = num_channels
UpperCAmelCase : Union[str, Any] = num_stages
UpperCAmelCase : Any = hidden_sizes
UpperCAmelCase : str = depths
UpperCAmelCase : Optional[int] = is_training
UpperCAmelCase : Union[str, Any] = use_labels
UpperCAmelCase : Any = intermediate_size
UpperCAmelCase : str = hidden_act
UpperCAmelCase : List[str] = num_labels
UpperCAmelCase : Tuple = initializer_range
UpperCAmelCase : Optional[Any] = out_features
UpperCAmelCase : List[str] = out_indices
UpperCAmelCase : Any = scope
def _lowercase( self ) -> Union[str, Any]:
UpperCAmelCase : Tuple = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCAmelCase : List[Any] = None
if self.use_labels:
UpperCAmelCase : Dict = ids_tensor([self.batch_size] , self.num_labels )
UpperCAmelCase : List[str] = self.get_config()
return config, pixel_values, labels
def _lowercase( self ) -> Optional[Any]:
return ConvNextVaConfig(
num_channels=self.num_channels , hidden_sizes=self.hidden_sizes , depths=self.depths , num_stages=self.num_stages , hidden_act=self.hidden_act , is_decoder=A , initializer_range=self.initializer_range , out_features=self.out_features , out_indices=self.out_indices , num_labels=self.num_labels , )
def _lowercase( self , A , A , A ) -> Optional[Any]:
UpperCAmelCase : int = ConvNextVaModel(config=A )
model.to(A )
model.eval()
UpperCAmelCase : List[Any] = model(A )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def _lowercase( self , A , A , A ) -> Any:
UpperCAmelCase : List[str] = ConvNextVaForImageClassification(A )
model.to(A )
model.eval()
UpperCAmelCase : int = model(A , labels=A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _lowercase( self , A , A , A ) -> Any:
UpperCAmelCase : Optional[Any] = ConvNextVaBackbone(config=A )
model.to(A )
model.eval()
UpperCAmelCase : Any = model(A )
# verify hidden states
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[1], 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , config.hidden_sizes[1:] )
# verify backbone works with out_features=None
UpperCAmelCase : Any = None
UpperCAmelCase : Optional[int] = ConvNextVaBackbone(config=A )
model.to(A )
model.eval()
UpperCAmelCase : int = model(A )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[-1], 1, 1] )
# verify channels
self.parent.assertEqual(len(model.channels ) , 1 )
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] )
def _lowercase( self ) -> List[str]:
UpperCAmelCase : Dict = self.prepare_config_and_inputs()
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : List[Any] = config_and_inputs
UpperCAmelCase : str = {"""pixel_values""": pixel_values}
return config, inputs_dict
def _lowercase( self ) -> List[Any]:
UpperCAmelCase : List[str] = self.prepare_config_and_inputs()
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : Optional[Any] = config_and_inputs
UpperCAmelCase : List[str] = {"""pixel_values""": pixel_values, """labels""": labels}
return config, inputs_dict
@require_torch
class UpperCamelCase_ ( __magic_name__ , __magic_name__ , unittest.TestCase ):
lowercase = (
(
ConvNextVaModel,
ConvNextVaForImageClassification,
ConvNextVaBackbone,
)
if is_torch_available()
else ()
)
lowercase = (
{'feature-extraction': ConvNextVaModel, 'image-classification': ConvNextVaForImageClassification}
if is_torch_available()
else {}
)
lowercase = False
lowercase = False
lowercase = False
lowercase = False
lowercase = False
def _lowercase( self ) -> Optional[int]:
UpperCAmelCase : Dict = ConvNextVaModelTester(self )
UpperCAmelCase : List[str] = ConfigTester(self , config_class=A , has_text_modality=A , hidden_size=37 )
def _lowercase( self ) -> int:
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def _lowercase( self ) -> List[str]:
return
@unittest.skip(reason="""ConvNextV2 does not use inputs_embeds""" )
def _lowercase( self ) -> Dict:
pass
@unittest.skip(reason="""ConvNextV2 does not support input and output embeddings""" )
def _lowercase( self ) -> Any:
pass
@unittest.skip(reason="""ConvNextV2 does not use feedforward chunking""" )
def _lowercase( self ) -> int:
pass
def _lowercase( self ) -> Dict:
if not self.model_tester.is_training:
return
for model_class in self.all_model_classes:
UpperCAmelCase , UpperCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs_with_labels()
UpperCAmelCase : Optional[int] = True
if model_class.__name__ in [
*get_values(A ),
*get_values(A ),
]:
continue
UpperCAmelCase : Any = model_class(A )
model.to(A )
model.train()
UpperCAmelCase : List[str] = self._prepare_for_class(A , A , return_labels=A )
UpperCAmelCase : List[str] = model(**A ).loss
loss.backward()
def _lowercase( self ) -> Tuple:
if not self.model_tester.is_training:
return
for model_class in self.all_model_classes:
UpperCAmelCase , UpperCAmelCase : Any = self.model_tester.prepare_config_and_inputs_with_labels()
UpperCAmelCase : List[str] = False
UpperCAmelCase : int = True
if (
model_class.__name__
in [*get_values(A ), *get_values(A )]
or not model_class.supports_gradient_checkpointing
):
continue
UpperCAmelCase : Dict = model_class(A )
model.to(A )
model.gradient_checkpointing_enable()
model.train()
UpperCAmelCase : Any = self._prepare_for_class(A , A , return_labels=A )
UpperCAmelCase : Any = model(**A ).loss
loss.backward()
def _lowercase( self ) -> Tuple:
UpperCAmelCase , UpperCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase : int = model_class(A )
UpperCAmelCase : List[str] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase : Tuple = [*signature.parameters.keys()]
UpperCAmelCase : Optional[Any] = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , A )
def _lowercase( self ) -> Optional[Any]:
UpperCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*A )
def _lowercase( self ) -> List[str]:
def check_hidden_states_output(A , A , A ):
UpperCAmelCase : Optional[Any] = model_class(A )
model.to(A )
model.eval()
with torch.no_grad():
UpperCAmelCase : Dict = model(**self._prepare_for_class(A , A ) )
UpperCAmelCase : Tuple = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
UpperCAmelCase : Optional[Any] = self.model_tester.num_stages
self.assertEqual(len(A ) , expected_num_stages + 1 )
# ConvNextV2's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
UpperCAmelCase , UpperCAmelCase : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase : str = True
check_hidden_states_output(A , A , A )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
UpperCAmelCase : int = True
check_hidden_states_output(A , A , A )
def _lowercase( self ) -> Tuple:
UpperCAmelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*A )
@slow
def _lowercase( self ) -> Any:
for model_name in CONVNEXTV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase : Tuple = ConvNextVaModel.from_pretrained(A )
self.assertIsNotNone(A )
def __lowerCamelCase ( ) -> Optional[int]:
UpperCAmelCase : Optional[int] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class UpperCamelCase_ ( unittest.TestCase ):
@cached_property
def _lowercase( self ) -> str:
return AutoImageProcessor.from_pretrained("""facebook/convnextv2-tiny-1k-224""" ) if is_vision_available() else None
@slow
def _lowercase( self ) -> List[Any]:
UpperCAmelCase : Any = ConvNextVaForImageClassification.from_pretrained("""facebook/convnextv2-tiny-1k-224""" ).to(A )
UpperCAmelCase : List[Any] = self.default_image_processor
UpperCAmelCase : Any = prepare_img()
UpperCAmelCase : Tuple = preprocessor(images=A , return_tensors="""pt""" ).to(A )
# forward pass
with torch.no_grad():
UpperCAmelCase : Optional[Any] = model(**A )
# verify the logits
UpperCAmelCase : Dict = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , A )
UpperCAmelCase : Dict = torch.tensor([0.9_9_9_6, 0.1_9_6_6, -0.4_3_8_6] ).to(A )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , A , atol=1e-4 ) )
| 672 | 1 |
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import ViTConfig, ViTForImageClassification, ViTImageProcessor, ViTModel
from transformers.utils import logging
logging.set_verbosity_info()
a : Dict = logging.get_logger(__name__)
def __lowerCamelCase ( _lowercase , _lowercase=False ) -> List[str]:
UpperCAmelCase : Union[str, Any] = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F'''blocks.{i}.norm1.weight''', F'''vit.encoder.layer.{i}.layernorm_before.weight''') )
rename_keys.append((F'''blocks.{i}.norm1.bias''', F'''vit.encoder.layer.{i}.layernorm_before.bias''') )
rename_keys.append((F'''blocks.{i}.attn.proj.weight''', F'''vit.encoder.layer.{i}.attention.output.dense.weight''') )
rename_keys.append((F'''blocks.{i}.attn.proj.bias''', F'''vit.encoder.layer.{i}.attention.output.dense.bias''') )
rename_keys.append((F'''blocks.{i}.norm2.weight''', F'''vit.encoder.layer.{i}.layernorm_after.weight''') )
rename_keys.append((F'''blocks.{i}.norm2.bias''', F'''vit.encoder.layer.{i}.layernorm_after.bias''') )
rename_keys.append((F'''blocks.{i}.mlp.fc1.weight''', F'''vit.encoder.layer.{i}.intermediate.dense.weight''') )
rename_keys.append((F'''blocks.{i}.mlp.fc1.bias''', F'''vit.encoder.layer.{i}.intermediate.dense.bias''') )
rename_keys.append((F'''blocks.{i}.mlp.fc2.weight''', F'''vit.encoder.layer.{i}.output.dense.weight''') )
rename_keys.append((F'''blocks.{i}.mlp.fc2.bias''', F'''vit.encoder.layer.{i}.output.dense.bias''') )
# projection layer + position embeddings
rename_keys.extend(
[
("""cls_token""", """vit.embeddings.cls_token"""),
("""patch_embed.proj.weight""", """vit.embeddings.patch_embeddings.projection.weight"""),
("""patch_embed.proj.bias""", """vit.embeddings.patch_embeddings.projection.bias"""),
("""pos_embed""", """vit.embeddings.position_embeddings"""),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
("""norm.weight""", """layernorm.weight"""),
("""norm.bias""", """layernorm.bias"""),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
UpperCAmelCase : Union[str, Any] = [(pair[0], pair[1][4:]) if pair[1].startswith("""vit""" ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
("""norm.weight""", """vit.layernorm.weight"""),
("""norm.bias""", """vit.layernorm.bias"""),
("""head.weight""", """classifier.weight"""),
("""head.bias""", """classifier.bias"""),
] )
return rename_keys
def __lowerCamelCase ( _lowercase , _lowercase , _lowercase=False ) -> Union[str, Any]:
for i in range(config.num_hidden_layers ):
if base_model:
UpperCAmelCase : Optional[int] = """"""
else:
UpperCAmelCase : Any = """vit."""
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
UpperCAmelCase : Optional[Any] = state_dict.pop(F'''blocks.{i}.attn.qkv.weight''' )
UpperCAmelCase : Optional[Any] = state_dict.pop(F'''blocks.{i}.attn.qkv.bias''' )
# next, add query, keys and values (in that order) to the state dict
UpperCAmelCase : Any = in_proj_weight[
: config.hidden_size, :
]
UpperCAmelCase : Optional[int] = in_proj_bias[: config.hidden_size]
UpperCAmelCase : str = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
UpperCAmelCase : Optional[Any] = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
UpperCAmelCase : Optional[Any] = in_proj_weight[
-config.hidden_size :, :
]
UpperCAmelCase : List[Any] = in_proj_bias[-config.hidden_size :]
def __lowerCamelCase ( _lowercase ) -> Optional[Any]:
UpperCAmelCase : Optional[int] = ["""head.weight""", """head.bias"""]
for k in ignore_keys:
state_dict.pop(_lowercase , _lowercase )
def __lowerCamelCase ( _lowercase , _lowercase , _lowercase ) -> Optional[int]:
UpperCAmelCase : str = dct.pop(_lowercase )
UpperCAmelCase : Union[str, Any] = val
def __lowerCamelCase ( ) -> Dict:
UpperCAmelCase : Union[str, Any] = """http://images.cocodataset.org/val2017/000000039769.jpg"""
UpperCAmelCase : Optional[int] = Image.open(requests.get(_lowercase , stream=_lowercase ).raw )
return im
@torch.no_grad()
def __lowerCamelCase ( _lowercase , _lowercase , _lowercase=True ) -> Dict:
UpperCAmelCase : int = ViTConfig()
# patch_size
if model_name[-1] == "8":
UpperCAmelCase : List[str] = 8
# set labels if required
if not base_model:
UpperCAmelCase : Optional[int] = 1_0_0_0
UpperCAmelCase : List[Any] = """huggingface/label-files"""
UpperCAmelCase : int = """imagenet-1k-id2label.json"""
UpperCAmelCase : Union[str, Any] = json.load(open(hf_hub_download(_lowercase , _lowercase , repo_type="""dataset""" ) , """r""" ) )
UpperCAmelCase : str = {int(_lowercase ): v for k, v in idalabel.items()}
UpperCAmelCase : str = idalabel
UpperCAmelCase : List[str] = {v: k for k, v in idalabel.items()}
# size of the architecture
if model_name in ["dino_vits8", "dino_vits16"]:
UpperCAmelCase : Optional[int] = 3_8_4
UpperCAmelCase : List[str] = 1_5_3_6
UpperCAmelCase : int = 1_2
UpperCAmelCase : Union[str, Any] = 6
# load original model from torch hub
UpperCAmelCase : Union[str, Any] = torch.hub.load("""facebookresearch/dino:main""" , _lowercase )
original_model.eval()
# load state_dict of original model, remove and rename some keys
UpperCAmelCase : Dict = original_model.state_dict()
if base_model:
remove_classification_head_(_lowercase )
UpperCAmelCase : Optional[int] = create_rename_keys(_lowercase , base_model=_lowercase )
for src, dest in rename_keys:
rename_key(_lowercase , _lowercase , _lowercase )
read_in_q_k_v(_lowercase , _lowercase , _lowercase )
# load HuggingFace model
if base_model:
UpperCAmelCase : Optional[int] = ViTModel(_lowercase , add_pooling_layer=_lowercase ).eval()
else:
UpperCAmelCase : Tuple = ViTForImageClassification(_lowercase ).eval()
model.load_state_dict(_lowercase )
# Check outputs on an image, prepared by ViTImageProcessor
UpperCAmelCase : Tuple = ViTImageProcessor()
UpperCAmelCase : Union[str, Any] = image_processor(images=prepare_img() , return_tensors="""pt""" )
UpperCAmelCase : Optional[int] = encoding["""pixel_values"""]
UpperCAmelCase : Dict = model(_lowercase )
if base_model:
UpperCAmelCase : Any = original_model(_lowercase )
assert torch.allclose(_lowercase , outputs.last_hidden_state[:, 0, :] , atol=1e-1 )
else:
UpperCAmelCase : List[Any] = original_model(_lowercase )
assert logits.shape == outputs.logits.shape
assert torch.allclose(_lowercase , outputs.logits , atol=1e-3 )
Path(_lowercase ).mkdir(exist_ok=_lowercase )
print(F'''Saving model {model_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(_lowercase )
print(F'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(_lowercase )
if __name__ == "__main__":
a : List[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default="""dino_vitb16""",
type=str,
help="""Name of the model trained with DINO you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
parser.add_argument(
"""--base_model""",
action="""store_true""",
help="""Whether to only convert the base model (no projection head weights).""",
)
parser.set_defaults(base_model=True)
a : Optional[Any] = parser.parse_args()
convert_vit_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.base_model)
| 672 |
'''simple docstring'''
from scipy.stats import pearsonr
import datasets
a : str = """
Pearson correlation coefficient and p-value for testing non-correlation.
The Pearson correlation coefficient measures the linear relationship between two datasets. The calculation of the p-value relies on the assumption that each dataset is normally distributed. Like other correlation coefficients, this one varies between -1 and +1 with 0 implying no correlation. Correlations of -1 or +1 imply an exact linear relationship. Positive correlations imply that as x increases, so does y. Negative correlations imply that as x increases, y decreases.
The p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Pearson correlation at least as extreme as the one computed from these datasets.
"""
a : Dict = """
Args:
predictions (`list` of `int`): Predicted class labels, as returned by a model.
references (`list` of `int`): Ground truth labels.
return_pvalue (`boolean`): If `True`, returns the p-value, along with the correlation coefficient. If `False`, returns only the correlation coefficient. Defaults to `False`.
Returns:
pearsonr (`float`): Pearson correlation coefficient. Minimum possible value is -1. Maximum possible value is 1. Values of 1 and -1 indicate exact linear positive and negative relationships, respectively. A value of 0 implies no correlation.
p-value (`float`): P-value, which roughly indicates the probability of an The p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Pearson correlation at least as extreme as the one computed from these datasets. Minimum possible value is 0. Maximum possible value is 1. Higher values indicate higher probabilities.
Examples:
Example 1-A simple example using only predictions and references.
>>> pearsonr_metric = datasets.load_metric(\"pearsonr\")
>>> results = pearsonr_metric.compute(predictions=[10, 9, 2.5, 6, 4], references=[1, 2, 3, 4, 5])
>>> print(round(results['pearsonr'], 2))
-0.74
Example 2-The same as Example 1, but that also returns the `p-value`.
>>> pearsonr_metric = datasets.load_metric(\"pearsonr\")
>>> results = pearsonr_metric.compute(predictions=[10, 9, 2.5, 6, 4], references=[1, 2, 3, 4, 5], return_pvalue=True)
>>> print(sorted(list(results.keys())))
['p-value', 'pearsonr']
>>> print(round(results['pearsonr'], 2))
-0.74
>>> print(round(results['p-value'], 2))
0.15
"""
a : Optional[int] = """
@article{2020SciPy-NMeth,
author = {Virtanen, Pauli and Gommers, Ralf and Oliphant, Travis E. and
Haberland, Matt and Reddy, Tyler and Cournapeau, David and
Burovski, Evgeni and Peterson, Pearu and Weckesser, Warren and
Bright, Jonathan and {van der Walt}, St{\'e}fan J. and
Brett, Matthew and Wilson, Joshua and Millman, K. Jarrod and
Mayorov, Nikolay and Nelson, Andrew R. J. and Jones, Eric and
Kern, Robert and Larson, Eric and Carey, C J and
Polat, Ilhan and Feng, Yu and Moore, Eric W. and
{VanderPlas}, Jake and Laxalde, Denis and Perktold, Josef and
Cimrman, Robert and Henriksen, Ian and Quintero, E. A. and
Harris, Charles R. and Archibald, Anne M. and
Ribeiro, Antonio H. and Pedregosa, Fabian and
{van Mulbregt}, Paul and {SciPy 1.0 Contributors}},
title = {{{SciPy} 1.0: Fundamental Algorithms for Scientific
Computing in Python}},
journal = {Nature Methods},
year = {2020},
volume = {17},
pages = {261--272},
adsurl = {https://rdcu.be/b08Wh},
doi = {10.1038/s41592-019-0686-2},
}
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class UpperCamelCase_ ( datasets.Metric ):
def _lowercase( self ) -> Optional[int]:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Value("""float""" ),
"""references""": datasets.Value("""float""" ),
} ) , reference_urls=["""https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.pearsonr.html"""] , )
def _lowercase( self , A , A , A=False ) -> int:
if return_pvalue:
UpperCAmelCase : int = pearsonr(A , A )
return {"pearsonr": results[0], "p-value": results[1]}
else:
return {"pearsonr": float(pearsonr(A , A )[0] )}
| 672 | 1 |
'''simple docstring'''
from collections import defaultdict
def __lowerCamelCase ( _lowercase , _lowercase ) -> bool:
UpperCAmelCase : Dict = first_str.lower().strip()
UpperCAmelCase : Union[str, Any] = second_str.lower().strip()
# Remove whitespace
UpperCAmelCase : Optional[int] = first_str.replace(""" """ , """""" )
UpperCAmelCase : Tuple = second_str.replace(""" """ , """""" )
# Strings of different lengths are not anagrams
if len(_lowercase ) != len(_lowercase ):
return False
# Default values for count should be 0
UpperCAmelCase : defaultdict[str, int] = defaultdict(_lowercase )
# For each character in input strings,
# increment count in the corresponding
for i in range(len(_lowercase ) ):
count[first_str[i]] += 1
count[second_str[i]] -= 1
return all(_count == 0 for _count in count.values() )
if __name__ == "__main__":
from doctest import testmod
testmod()
a : List[Any] = input("""Enter the first string """).strip()
a : Union[str, Any] = input("""Enter the second string """).strip()
a : Any = check_anagrams(input_a, input_b)
print(F'''{input_a} and {input_b} are {'' if status else 'not '}anagrams.''')
| 672 |
'''simple docstring'''
from __future__ import annotations
from collections.abc import Sequence
from typing import Literal
def __lowerCamelCase ( _lowercase , _lowercase ) -> str | Literal[False]:
UpperCAmelCase : Optional[int] = list(_lowercase )
UpperCAmelCase : Dict = list(_lowercase )
UpperCAmelCase : str = 0
for i in range(len(_lowercase ) ):
if lista[i] != lista[i]:
count += 1
UpperCAmelCase : Optional[Any] = """_"""
if count > 1:
return False
else:
return "".join(_lowercase )
def __lowerCamelCase ( _lowercase ) -> list[str]:
UpperCAmelCase : List[str] = []
while True:
UpperCAmelCase : Optional[int] = ["""$"""] * len(_lowercase )
UpperCAmelCase : int = []
for i in range(len(_lowercase ) ):
for j in range(i + 1 , len(_lowercase ) ):
UpperCAmelCase : str = compare_string(binary[i] , binary[j] )
if k is False:
UpperCAmelCase : Union[str, Any] = """*"""
UpperCAmelCase : Optional[Any] = """*"""
temp.append("""X""" )
for i in range(len(_lowercase ) ):
if checka[i] == "$":
pi.append(binary[i] )
if len(_lowercase ) == 0:
return pi
UpperCAmelCase : List[Any] = list(set(_lowercase ) )
def __lowerCamelCase ( _lowercase , _lowercase ) -> list[str]:
UpperCAmelCase : Dict = []
for minterm in minterms:
UpperCAmelCase : List[str] = """"""
for _ in range(_lowercase ):
UpperCAmelCase : Dict = str(minterm % 2 ) + string
minterm //= 2
temp.append(_lowercase )
return temp
def __lowerCamelCase ( _lowercase , _lowercase , _lowercase ) -> bool:
UpperCAmelCase : Optional[int] = list(_lowercase )
UpperCAmelCase : Dict = list(_lowercase )
UpperCAmelCase : Dict = 0
for i in range(len(_lowercase ) ):
if lista[i] != lista[i]:
count_n += 1
return count_n == count
def __lowerCamelCase ( _lowercase , _lowercase ) -> list[str]:
UpperCAmelCase : Tuple = []
UpperCAmelCase : Optional[int] = [0] * len(_lowercase )
for i in range(len(chart[0] ) ):
UpperCAmelCase : Any = 0
UpperCAmelCase : Optional[Any] = -1
for j in range(len(_lowercase ) ):
if chart[j][i] == 1:
count += 1
UpperCAmelCase : str = j
if count == 1:
UpperCAmelCase : Optional[int] = 1
for i in range(len(_lowercase ) ):
if select[i] == 1:
for j in range(len(chart[0] ) ):
if chart[i][j] == 1:
for k in range(len(_lowercase ) ):
UpperCAmelCase : List[str] = 0
temp.append(prime_implicants[i] )
while True:
UpperCAmelCase : int = 0
UpperCAmelCase : Tuple = -1
UpperCAmelCase : Union[str, Any] = 0
for i in range(len(_lowercase ) ):
UpperCAmelCase : Optional[Any] = chart[i].count(1 )
if count_n > max_n:
UpperCAmelCase : Union[str, Any] = count_n
UpperCAmelCase : Optional[Any] = i
if max_n == 0:
return temp
temp.append(prime_implicants[rem] )
for i in range(len(chart[0] ) ):
if chart[rem][i] == 1:
for j in range(len(_lowercase ) ):
UpperCAmelCase : Optional[Any] = 0
def __lowerCamelCase ( _lowercase , _lowercase ) -> list[list[int]]:
UpperCAmelCase : Optional[int] = [[0 for x in range(len(_lowercase ) )] for x in range(len(_lowercase ) )]
for i in range(len(_lowercase ) ):
UpperCAmelCase : Tuple = prime_implicants[i].count("""_""" )
for j in range(len(_lowercase ) ):
if is_for_table(prime_implicants[i] , binary[j] , _lowercase ):
UpperCAmelCase : List[Any] = 1
return chart
def __lowerCamelCase ( ) -> None:
UpperCAmelCase : str = int(input("""Enter the no. of variables\n""" ) )
UpperCAmelCase : List[Any] = [
float(_lowercase )
for x in input(
"""Enter the decimal representation of Minterms 'Spaces Separated'\n""" ).split()
]
UpperCAmelCase : str = decimal_to_binary(_lowercase , _lowercase )
UpperCAmelCase : Tuple = check(_lowercase )
print("""Prime Implicants are:""" )
print(_lowercase )
UpperCAmelCase : Union[str, Any] = prime_implicant_chart(_lowercase , _lowercase )
UpperCAmelCase : Tuple = selection(_lowercase , _lowercase )
print("""Essential Prime Implicants are:""" )
print(_lowercase )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 672 | 1 |
'''simple docstring'''
import unittest
from transformers import PegasusConfig, PegasusTokenizer, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor
if is_flax_available():
import os
# The slow tests are often failing with OOM error on GPU
# This makes JAX allocate exactly what is needed on demand, and deallocate memory that is no longer needed
# but will be slower as stated here https://jax.readthedocs.io/en/latest/gpu_memory_allocation.html
a : Union[str, Any] = """platform"""
import jax
import jax.numpy as jnp
import numpy as np
from transformers import FlaxPegasusForConditionalGeneration, FlaxPegasusModel
@require_flax
class UpperCamelCase_ :
lowercase = PegasusConfig
lowercase = {}
lowercase = 'gelu'
def __init__( self , A , A=13 , A=7 , A=True , A=False , A=99 , A=32 , A=5 , A=4 , A=37 , A=0.1 , A=0.1 , A=20 , A=2 , A=1 , A=0 , ) -> str:
UpperCAmelCase : Optional[Any] = parent
UpperCAmelCase : List[Any] = batch_size
UpperCAmelCase : Any = seq_length
UpperCAmelCase : Tuple = is_training
UpperCAmelCase : Tuple = use_labels
UpperCAmelCase : Tuple = vocab_size
UpperCAmelCase : str = hidden_size
UpperCAmelCase : Optional[Any] = num_hidden_layers
UpperCAmelCase : int = num_attention_heads
UpperCAmelCase : str = intermediate_size
UpperCAmelCase : Any = hidden_dropout_prob
UpperCAmelCase : str = attention_probs_dropout_prob
UpperCAmelCase : List[Any] = max_position_embeddings
UpperCAmelCase : Any = eos_token_id
UpperCAmelCase : Any = pad_token_id
UpperCAmelCase : Optional[int] = bos_token_id
def _lowercase( self ) -> int:
UpperCAmelCase : Dict = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ).clip(3 , self.vocab_size )
UpperCAmelCase : Tuple = np.expand_dims(np.array([self.eos_token_id] * self.batch_size ) , 1 )
UpperCAmelCase : Union[str, Any] = np.concatenate([input_ids, eos_tensor] , axis=1 )
UpperCAmelCase : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCAmelCase : Tuple = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
UpperCAmelCase : List[str] = prepare_pegasus_inputs_dict(A , A , A )
return config, inputs_dict
def _lowercase( self , A , A , A ) -> Optional[Any]:
UpperCAmelCase : Dict = 20
UpperCAmelCase : Optional[Any] = model_class_name(A )
UpperCAmelCase : int = model.encode(inputs_dict["""input_ids"""] )
UpperCAmelCase , UpperCAmelCase : int = (
inputs_dict["""decoder_input_ids"""],
inputs_dict["""decoder_attention_mask"""],
)
UpperCAmelCase : int = model.init_cache(decoder_input_ids.shape[0] , A , A )
UpperCAmelCase : int = jnp.ones((decoder_input_ids.shape[0], max_decoder_length) , dtype="""i4""" )
UpperCAmelCase : Optional[int] = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
UpperCAmelCase : List[str] = model.decode(
decoder_input_ids[:, :-1] , A , decoder_attention_mask=A , past_key_values=A , decoder_position_ids=A , )
UpperCAmelCase : List[Any] = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype="""i4""" )
UpperCAmelCase : Optional[Any] = model.decode(
decoder_input_ids[:, -1:] , A , decoder_attention_mask=A , past_key_values=outputs_cache.past_key_values , decoder_position_ids=A , )
UpperCAmelCase : List[Any] = model.decode(A , A )
UpperCAmelCase : Any = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1e-3 , msg=f'''Max diff is {diff}''' )
def _lowercase( self , A , A , A ) -> Optional[Any]:
UpperCAmelCase : str = 20
UpperCAmelCase : Optional[int] = model_class_name(A )
UpperCAmelCase : Optional[int] = model.encode(inputs_dict["""input_ids"""] )
UpperCAmelCase , UpperCAmelCase : str = (
inputs_dict["""decoder_input_ids"""],
inputs_dict["""decoder_attention_mask"""],
)
UpperCAmelCase : Dict = jnp.concatenate(
[
decoder_attention_mask,
jnp.zeros((decoder_attention_mask.shape[0], max_decoder_length - decoder_attention_mask.shape[1]) ),
] , axis=-1 , )
UpperCAmelCase : int = model.init_cache(decoder_input_ids.shape[0] , A , A )
UpperCAmelCase : List[str] = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
UpperCAmelCase : Union[str, Any] = model.decode(
decoder_input_ids[:, :-1] , A , decoder_attention_mask=A , past_key_values=A , decoder_position_ids=A , )
UpperCAmelCase : Union[str, Any] = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype="""i4""" )
UpperCAmelCase : List[str] = model.decode(
decoder_input_ids[:, -1:] , A , past_key_values=outputs_cache.past_key_values , decoder_attention_mask=A , decoder_position_ids=A , )
UpperCAmelCase : Any = model.decode(A , A , decoder_attention_mask=A )
UpperCAmelCase : List[Any] = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1e-3 , msg=f'''Max diff is {diff}''' )
def __lowerCamelCase ( _lowercase , _lowercase , _lowercase , _lowercase=None , _lowercase=None , ) -> int:
if attention_mask is None:
UpperCAmelCase : int = np.not_equal(_lowercase , config.pad_token_id ).astype(np.inta )
if decoder_attention_mask is None:
UpperCAmelCase : List[Any] = np.concatenate(
[
np.ones(decoder_input_ids[:, :1].shape , dtype=np.inta ),
np.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ).astype(np.inta ),
] , axis=-1 , )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
}
@require_flax
class UpperCamelCase_ ( __magic_name__ , unittest.TestCase ):
lowercase = (
(
FlaxPegasusForConditionalGeneration,
FlaxPegasusModel,
)
if is_flax_available()
else ()
)
lowercase = (FlaxPegasusForConditionalGeneration,) if is_flax_available() else ()
lowercase = True
lowercase = False
lowercase = False
lowercase = False
def _lowercase( self ) -> int:
UpperCAmelCase : List[Any] = FlaxPegasusModelTester(self )
UpperCAmelCase : Optional[Any] = ConfigTester(self , config_class=A )
def _lowercase( self ) -> List[str]:
self.config_tester.run_common_tests()
def _lowercase( self ) -> Optional[Any]:
UpperCAmelCase , UpperCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward(A , A , A )
def _lowercase( self ) -> str:
UpperCAmelCase , UpperCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward_with_attn_mask(A , A , A )
def _lowercase( self ) -> Any:
UpperCAmelCase , UpperCAmelCase : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
UpperCAmelCase : List[Any] = self._prepare_for_class(A , A )
UpperCAmelCase : int = model_class(A )
@jax.jit
def encode_jitted(A , A=None , **A ):
return model.encode(input_ids=A , attention_mask=A )
with self.subTest("""JIT Enabled""" ):
UpperCAmelCase : Optional[int] = encode_jitted(**A ).to_tuple()
with self.subTest("""JIT Disabled""" ):
with jax.disable_jit():
UpperCAmelCase : Any = encode_jitted(**A ).to_tuple()
self.assertEqual(len(A ) , len(A ) )
for jitted_output, output in zip(A , A ):
self.assertEqual(jitted_output.shape , output.shape )
def _lowercase( self ) -> List[str]:
UpperCAmelCase , UpperCAmelCase : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
UpperCAmelCase : Union[str, Any] = model_class(A )
UpperCAmelCase : Tuple = model.encode(inputs_dict["""input_ids"""] , inputs_dict["""attention_mask"""] )
UpperCAmelCase : Tuple = {
"""decoder_input_ids""": inputs_dict["""decoder_input_ids"""],
"""decoder_attention_mask""": inputs_dict["""decoder_attention_mask"""],
"""encoder_outputs""": encoder_outputs,
}
@jax.jit
def decode_jitted(A , A , A ):
return model.decode(
decoder_input_ids=A , decoder_attention_mask=A , encoder_outputs=A , )
with self.subTest("""JIT Enabled""" ):
UpperCAmelCase : Tuple = decode_jitted(**A ).to_tuple()
with self.subTest("""JIT Disabled""" ):
with jax.disable_jit():
UpperCAmelCase : Optional[int] = decode_jitted(**A ).to_tuple()
self.assertEqual(len(A ) , len(A ) )
for jitted_output, output in zip(A , A ):
self.assertEqual(jitted_output.shape , output.shape )
@slow
def _lowercase( self ) -> Optional[Any]:
for model_class_name in self.all_model_classes:
UpperCAmelCase : Any = model_class_name.from_pretrained("""google/pegasus-large""" , from_pt=A )
UpperCAmelCase : str = np.ones((1, 1) )
UpperCAmelCase : int = model(A )
self.assertIsNotNone(A )
@slow
def _lowercase( self ) -> List[Any]:
UpperCAmelCase : int = FlaxPegasusForConditionalGeneration.from_pretrained("""google/pegasus-xsum""" )
UpperCAmelCase : int = PegasusTokenizer.from_pretrained("""google/pegasus-xsum""" )
UpperCAmelCase : Any = [
""" PG&E stated it scheduled the blackouts in response to forecasts for high winds amid dry conditions. The aim is to reduce the risk of wildfires. Nearly 800 thousand customers were scheduled to be affected by the shutoffs which were expected to last through at least midday tomorrow.""",
""" The London trio are up for best UK act and best album, as well as getting two nominations in the best song category.\"We got told like this morning 'Oh I think you're nominated'\", said Dappy.\"And I was like 'Oh yeah, which one?' And now we've got nominated for four awards. I mean, wow!\"Bandmate Fazer added: \"We thought it's best of us to come down and mingle with everyone and say hello to the cameras. And now we find we've got four nominations.\"The band have two shots at the best song prize, getting the nod for their Tynchy Stryder collaboration Number One, and single Strong Again.Their album Uncle B will also go up against records by the likes of Beyonce and Kanye West.N-Dubz picked up the best newcomer Mobo in 2007, but female member Tulisa said they wouldn't be too disappointed if they didn't win this time around.\"At the end of the day we're grateful to be where we are in our careers.\"If it don't happen then it don't happen - live to fight another day and keep on making albums and hits for the fans.\"Dappy also revealed they could be performing live several times on the night.The group will be doing Number One and also a possible rendition of the War Child single, I Got Soul.The charity song is a re-working of The Killers' All These Things That I've Done and is set to feature artists like Chipmunk, Ironik and Pixie Lott.This year's Mobos will be held outside of London for the first time, in Glasgow on 30 September.N-Dubz said they were looking forward to performing for their Scottish fans and boasted about their recent shows north of the border.\"We just done Edinburgh the other day,\" said Dappy.\"We smashed up an N-Dubz show over there. We done Aberdeen about three or four months ago - we smashed up that show over there! Everywhere we go we smash it up!\" """,
]
UpperCAmelCase : Optional[Any] = [
"""California's largest electricity provider has turned off power to hundreds of thousands of customers.""",
"""Pop group N-Dubz have revealed they were surprised to get four nominations for this year's Mobo Awards.""",
]
UpperCAmelCase : List[Any] = tokenizer(A , return_tensors="""np""" , truncation=A , max_length=512 , padding=A )
UpperCAmelCase : List[Any] = model.generate(**A , num_beams=2 ).sequences
UpperCAmelCase : List[Any] = tokenizer.batch_decode(A , skip_special_tokens=A )
assert tgt_text == decoded
| 672 |
'''simple docstring'''
a : Tuple = [sum(int(c, 1_0) ** 2 for c in i.__str__()) for i in range(1_0_0_0_0_0)]
def __lowerCamelCase ( _lowercase ) -> int:
UpperCAmelCase : str = 0
while number:
# Increased Speed Slightly by checking every 5 digits together.
sum_of_digits_squared += DIGITS_SQUARED[number % 1_0_0_0_0_0]
number //= 1_0_0_0_0_0
return sum_of_digits_squared
# There are 2 Chains made,
# One ends with 89 with the chain member 58 being the one which when declared first,
# there will be the least number of iterations for all the members to be checked.
# The other one ends with 1 and has only one element 1.
# So 58 and 1 are chosen to be declared at the starting.
# Changed dictionary to an array to quicken the solution
a : list[bool | None] = [None] * 1_0_0_0_0_0_0_0
a : Optional[Any] = True
a : List[Any] = False
def __lowerCamelCase ( _lowercase ) -> bool:
if CHAINS[number - 1] is not None:
return CHAINS[number - 1] # type: ignore
UpperCAmelCase : List[str] = chain(next_number(_lowercase ) )
UpperCAmelCase : Tuple = number_chain
while number < 1_0_0_0_0_0_0_0:
UpperCAmelCase : List[str] = number_chain
number *= 1_0
return number_chain
def __lowerCamelCase ( _lowercase = 1_0_0_0_0_0_0_0 ) -> int:
for i in range(1 , _lowercase ):
if CHAINS[i] is None:
chain(i + 1 )
return CHAINS[:number].count(_lowercase )
if __name__ == "__main__":
import doctest
doctest.testmod()
print(F'''{solution() = }''')
| 672 | 1 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_barthez import BarthezTokenizer
else:
a : int = None
a : List[Any] = logging.get_logger(__name__)
a : Dict = {"""vocab_file""": """sentencepiece.bpe.model""", """tokenizer_file""": """tokenizer.json"""}
a : Union[str, Any] = {
"""vocab_file""": {
"""moussaKam/mbarthez""": """https://huggingface.co/moussaKam/mbarthez/resolve/main/sentencepiece.bpe.model""",
"""moussaKam/barthez""": """https://huggingface.co/moussaKam/barthez/resolve/main/sentencepiece.bpe.model""",
"""moussaKam/barthez-orangesum-title""": (
"""https://huggingface.co/moussaKam/barthez-orangesum-title/resolve/main/sentencepiece.bpe.model"""
),
},
"""tokenizer_file""": {
"""moussaKam/mbarthez""": """https://huggingface.co/moussaKam/mbarthez/resolve/main/tokenizer.json""",
"""moussaKam/barthez""": """https://huggingface.co/moussaKam/barthez/resolve/main/tokenizer.json""",
"""moussaKam/barthez-orangesum-title""": (
"""https://huggingface.co/moussaKam/barthez-orangesum-title/resolve/main/tokenizer.json"""
),
},
}
a : List[Any] = {
"""moussaKam/mbarthez""": 1_0_2_4,
"""moussaKam/barthez""": 1_0_2_4,
"""moussaKam/barthez-orangesum-title""": 1_0_2_4,
}
a : int = """▁"""
class UpperCamelCase_ ( __magic_name__ ):
lowercase = VOCAB_FILES_NAMES
lowercase = PRETRAINED_VOCAB_FILES_MAP
lowercase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase = ['input_ids', 'attention_mask']
lowercase = BarthezTokenizer
def __init__( self , A=None , A=None , A="<s>" , A="</s>" , A="</s>" , A="<s>" , A="<unk>" , A="<pad>" , A="<mask>" , **A , ) -> List[Any]:
# Mask token behave like a normal word, i.e. include the space before it
UpperCAmelCase : Tuple = AddedToken(A , lstrip=A , rstrip=A ) if isinstance(A , A ) else mask_token
super().__init__(
A , tokenizer_file=A , bos_token=A , eos_token=A , unk_token=A , sep_token=A , cls_token=A , pad_token=A , mask_token=A , **A , )
UpperCAmelCase : Union[str, Any] = vocab_file
UpperCAmelCase : int = False if not self.vocab_file else True
def _lowercase( self , A , A = None ) -> List[int]:
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
UpperCAmelCase : Optional[int] = [self.cls_token_id]
UpperCAmelCase : List[str] = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def _lowercase( self , A , A = None ) -> List[int]:
UpperCAmelCase : Optional[int] = [self.sep_token_id]
UpperCAmelCase : List[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def _lowercase( self , A , A = None ) -> Tuple[str]:
if not self.can_save_slow_tokenizer:
raise ValueError(
"""Your fast tokenizer does not have the necessary information to save the vocabulary for a slow """
"""tokenizer.""" )
if not os.path.isdir(A ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
UpperCAmelCase : str = os.path.join(
A , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(A ):
copyfile(self.vocab_file , A )
return (out_vocab_file,)
| 672 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tensorflow_text_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
a : Optional[Any] = {
"""configuration_bert""": ["""BERT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """BertConfig""", """BertOnnxConfig"""],
"""tokenization_bert""": ["""BasicTokenizer""", """BertTokenizer""", """WordpieceTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : Dict = ["""BertTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : List[Any] = [
"""BERT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""BertForMaskedLM""",
"""BertForMultipleChoice""",
"""BertForNextSentencePrediction""",
"""BertForPreTraining""",
"""BertForQuestionAnswering""",
"""BertForSequenceClassification""",
"""BertForTokenClassification""",
"""BertLayer""",
"""BertLMHeadModel""",
"""BertModel""",
"""BertPreTrainedModel""",
"""load_tf_weights_in_bert""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : List[str] = [
"""TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFBertEmbeddings""",
"""TFBertForMaskedLM""",
"""TFBertForMultipleChoice""",
"""TFBertForNextSentencePrediction""",
"""TFBertForPreTraining""",
"""TFBertForQuestionAnswering""",
"""TFBertForSequenceClassification""",
"""TFBertForTokenClassification""",
"""TFBertLMHeadModel""",
"""TFBertMainLayer""",
"""TFBertModel""",
"""TFBertPreTrainedModel""",
]
try:
if not is_tensorflow_text_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : int = ["""TFBertTokenizer"""]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : Optional[Any] = [
"""FlaxBertForCausalLM""",
"""FlaxBertForMaskedLM""",
"""FlaxBertForMultipleChoice""",
"""FlaxBertForNextSentencePrediction""",
"""FlaxBertForPreTraining""",
"""FlaxBertForQuestionAnswering""",
"""FlaxBertForSequenceClassification""",
"""FlaxBertForTokenClassification""",
"""FlaxBertModel""",
"""FlaxBertPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_bert import BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, BertConfig, BertOnnxConfig
from .tokenization_bert import BasicTokenizer, BertTokenizer, WordpieceTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bert_fast import BertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_bert import (
BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
BertForMaskedLM,
BertForMultipleChoice,
BertForNextSentencePrediction,
BertForPreTraining,
BertForQuestionAnswering,
BertForSequenceClassification,
BertForTokenClassification,
BertLayer,
BertLMHeadModel,
BertModel,
BertPreTrainedModel,
load_tf_weights_in_bert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_bert import (
TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFBertEmbeddings,
TFBertForMaskedLM,
TFBertForMultipleChoice,
TFBertForNextSentencePrediction,
TFBertForPreTraining,
TFBertForQuestionAnswering,
TFBertForSequenceClassification,
TFBertForTokenClassification,
TFBertLMHeadModel,
TFBertMainLayer,
TFBertModel,
TFBertPreTrainedModel,
)
try:
if not is_tensorflow_text_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bert_tf import TFBertTokenizer
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_bert import (
FlaxBertForCausalLM,
FlaxBertForMaskedLM,
FlaxBertForMultipleChoice,
FlaxBertForNextSentencePrediction,
FlaxBertForPreTraining,
FlaxBertForQuestionAnswering,
FlaxBertForSequenceClassification,
FlaxBertForTokenClassification,
FlaxBertModel,
FlaxBertPreTrainedModel,
)
else:
import sys
a : Optional[int] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 672 | 1 |
'''simple docstring'''
import qiskit
def __lowerCamelCase ( _lowercase , _lowercase ) -> qiskit.result.counts.Counts:
UpperCAmelCase : Any = qiskit.Aer.get_backend("""aer_simulator""" )
# Create a Quantum Circuit acting on the q register
UpperCAmelCase : int = qiskit.QuantumCircuit(_lowercase , _lowercase )
# Map the quantum measurement to the classical bits
circuit.measure([0] , [0] )
# Execute the circuit on the simulator
UpperCAmelCase : Union[str, Any] = qiskit.execute(_lowercase , _lowercase , shots=1_0_0_0 )
# Return the histogram data of the results of the experiment.
return job.result().get_counts(_lowercase )
if __name__ == "__main__":
print(F'''Total count for various states are: {single_qubit_measure(1, 1)}''')
| 672 |
'''simple docstring'''
import math
import random
from typing import Any
from .hill_climbing import SearchProblem
def __lowerCamelCase ( _lowercase , _lowercase = True , _lowercase = math.inf , _lowercase = -math.inf , _lowercase = math.inf , _lowercase = -math.inf , _lowercase = False , _lowercase = 1_0_0 , _lowercase = 0.01 , _lowercase = 1 , ) -> Any:
UpperCAmelCase : Optional[int] = False
UpperCAmelCase : Any = search_prob
UpperCAmelCase : Any = start_temperate
UpperCAmelCase : Optional[Any] = []
UpperCAmelCase : Optional[Any] = 0
UpperCAmelCase : Optional[Any] = None
while not search_end:
UpperCAmelCase : List[str] = current_state.score()
if best_state is None or current_score > best_state.score():
UpperCAmelCase : List[Any] = current_state
scores.append(_lowercase )
iterations += 1
UpperCAmelCase : Dict = None
UpperCAmelCase : Union[str, Any] = current_state.get_neighbors()
while (
next_state is None and neighbors
): # till we do not find a neighbor that we can move to
UpperCAmelCase : int = random.randint(0 , len(_lowercase ) - 1 ) # picking a random neighbor
UpperCAmelCase : int = neighbors.pop(_lowercase )
UpperCAmelCase : Tuple = picked_neighbor.score() - current_score
if (
picked_neighbor.x > max_x
or picked_neighbor.x < min_x
or picked_neighbor.y > max_y
or picked_neighbor.y < min_y
):
continue # neighbor outside our bounds
if not find_max:
UpperCAmelCase : Union[str, Any] = change * -1 # in case we are finding minimum
if change > 0: # improves the solution
UpperCAmelCase : int = picked_neighbor
else:
UpperCAmelCase : Optional[Any] = (math.e) ** (
change / current_temp
) # probability generation function
if random.random() < probability: # random number within probability
UpperCAmelCase : Optional[int] = picked_neighbor
UpperCAmelCase : List[Any] = current_temp - (current_temp * rate_of_decrease)
if current_temp < threshold_temp or next_state is None:
# temperature below threshold, or could not find a suitable neighbor
UpperCAmelCase : Optional[int] = True
else:
UpperCAmelCase : Optional[int] = next_state
if visualization:
from matplotlib import pyplot as plt
plt.plot(range(_lowercase ) , _lowercase )
plt.xlabel("""Iterations""" )
plt.ylabel("""Function values""" )
plt.show()
return best_state
if __name__ == "__main__":
def __lowerCamelCase ( _lowercase , _lowercase ) -> str:
return (x**2) + (y**2)
# starting the problem with initial coordinates (12, 47)
a : Dict = SearchProblem(x=1_2, y=4_7, step_size=1, function_to_optimize=test_fa)
a : Dict = simulated_annealing(
prob, find_max=False, max_x=1_0_0, min_x=5, max_y=5_0, min_y=-5, visualization=True
)
print(
"""The minimum score for f(x, y) = x^2 + y^2 with the domain 100 > x > 5 """
F'''and 50 > y > - 5 found via hill climbing: {local_min.score()}'''
)
# starting the problem with initial coordinates (12, 47)
a : List[str] = SearchProblem(x=1_2, y=4_7, step_size=1, function_to_optimize=test_fa)
a : Dict = simulated_annealing(
prob, find_max=True, max_x=1_0_0, min_x=5, max_y=5_0, min_y=-5, visualization=True
)
print(
"""The maximum score for f(x, y) = x^2 + y^2 with the domain 100 > x > 5 """
F'''and 50 > y > - 5 found via hill climbing: {local_min.score()}'''
)
def __lowerCamelCase ( _lowercase , _lowercase ) -> Optional[Any]:
return (3 * x**2) - (6 * y)
a : Tuple = SearchProblem(x=3, y=4, step_size=1, function_to_optimize=test_fa)
a : Any = simulated_annealing(prob, find_max=False, visualization=True)
print(
"""The minimum score for f(x, y) = 3*x^2 - 6*y found via hill climbing: """
F'''{local_min.score()}'''
)
a : List[Any] = SearchProblem(x=3, y=4, step_size=1, function_to_optimize=test_fa)
a : Dict = simulated_annealing(prob, find_max=True, visualization=True)
print(
"""The maximum score for f(x, y) = 3*x^2 - 6*y found via hill climbing: """
F'''{local_min.score()}'''
)
| 672 | 1 |
'''simple docstring'''
import colorsys
from PIL import Image # type: ignore
def __lowerCamelCase ( _lowercase , _lowercase , _lowercase ) -> float:
UpperCAmelCase : List[Any] = x
UpperCAmelCase : List[str] = y
for step in range(_lowercase ): # noqa: B007
UpperCAmelCase : Optional[Any] = a * a - b * b + x
UpperCAmelCase : Union[str, Any] = 2 * a * b + y
UpperCAmelCase : List[Any] = a_new
# divergence happens for all complex number with an absolute value
# greater than 4
if a * a + b * b > 4:
break
return step / (max_step - 1)
def __lowerCamelCase ( _lowercase ) -> tuple:
if distance == 1:
return (0, 0, 0)
else:
return (2_5_5, 2_5_5, 2_5_5)
def __lowerCamelCase ( _lowercase ) -> tuple:
if distance == 1:
return (0, 0, 0)
else:
return tuple(round(i * 2_5_5 ) for i in colorsys.hsv_to_rgb(_lowercase , 1 , 1 ) )
def __lowerCamelCase ( _lowercase = 8_0_0 , _lowercase = 6_0_0 , _lowercase = -0.6 , _lowercase = 0 , _lowercase = 3.2 , _lowercase = 5_0 , _lowercase = True , ) -> Image.Image:
UpperCAmelCase : Optional[int] = Image.new("""RGB""" , (image_width, image_height) )
UpperCAmelCase : Any = img.load()
# loop through the image-coordinates
for image_x in range(_lowercase ):
for image_y in range(_lowercase ):
# determine the figure-coordinates based on the image-coordinates
UpperCAmelCase : Dict = figure_width / image_width * image_height
UpperCAmelCase : Dict = figure_center_x + (image_x / image_width - 0.5) * figure_width
UpperCAmelCase : List[Any] = figure_center_y + (image_y / image_height - 0.5) * figure_height
UpperCAmelCase : Optional[Any] = get_distance(_lowercase , _lowercase , _lowercase )
# color the corresponding pixel based on the selected coloring-function
if use_distance_color_coding:
UpperCAmelCase : Dict = get_color_coded_rgb(_lowercase )
else:
UpperCAmelCase : int = get_black_and_white_rgb(_lowercase )
return img
if __name__ == "__main__":
import doctest
doctest.testmod()
# colored version, full figure
a : Optional[int] = get_image()
# uncomment for colored version, different section, zoomed in
# img = get_image(figure_center_x = -0.6, figure_center_y = -0.4,
# figure_width = 0.8)
# uncomment for black and white version, full figure
# img = get_image(use_distance_color_coding = False)
# uncomment to save the image
# img.save("mandelbrot.png")
img.show()
| 672 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
a : Any = {
"""configuration_nezha""": ["""NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP""", """NezhaConfig"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : Any = [
"""NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""NezhaForNextSentencePrediction""",
"""NezhaForMaskedLM""",
"""NezhaForPreTraining""",
"""NezhaForMultipleChoice""",
"""NezhaForQuestionAnswering""",
"""NezhaForSequenceClassification""",
"""NezhaForTokenClassification""",
"""NezhaModel""",
"""NezhaPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_nezha import NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP, NezhaConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_nezha import (
NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST,
NezhaForMaskedLM,
NezhaForMultipleChoice,
NezhaForNextSentencePrediction,
NezhaForPreTraining,
NezhaForQuestionAnswering,
NezhaForSequenceClassification,
NezhaForTokenClassification,
NezhaModel,
NezhaPreTrainedModel,
)
else:
import sys
a : Union[str, Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 672 | 1 |
'''simple docstring'''
import random
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
UNetaDConditionModel,
VideoToVideoSDPipeline,
)
from diffusers.utils import floats_tensor, is_xformers_available, skip_mps
from diffusers.utils.testing_utils import enable_full_determinism, slow, torch_device
from ..pipeline_params import (
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
@skip_mps
class UpperCamelCase_ ( __magic_name__ , unittest.TestCase ):
lowercase = VideoToVideoSDPipeline
lowercase = TEXT_GUIDED_IMAGE_VARIATION_PARAMS.union({'video'} ) - {'image', 'width', 'height'}
lowercase = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS.union({'video'} ) - {'image'}
lowercase = PipelineTesterMixin.required_optional_params - {'latents'}
lowercase = False
# No `output_type`.
lowercase = frozenset(
[
'num_inference_steps',
'generator',
'latents',
'return_dict',
'callback',
'callback_steps',
] )
def _lowercase( self ) -> int:
torch.manual_seed(0 )
UpperCAmelCase : Union[str, Any] = UNetaDConditionModel(
block_out_channels=(32, 64, 64, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""CrossAttnDownBlock3D""", """CrossAttnDownBlock3D""", """CrossAttnDownBlock3D""", """DownBlock3D""") , up_block_types=("""UpBlock3D""", """CrossAttnUpBlock3D""", """CrossAttnUpBlock3D""", """CrossAttnUpBlock3D""") , cross_attention_dim=32 , attention_head_dim=4 , )
UpperCAmelCase : Any = DDIMScheduler(
beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , beta_schedule="""scaled_linear""" , clip_sample=A , set_alpha_to_one=A , )
torch.manual_seed(0 )
UpperCAmelCase : int = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , sample_size=128 , )
torch.manual_seed(0 )
UpperCAmelCase : Any = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , hidden_act="""gelu""" , projection_dim=512 , )
UpperCAmelCase : str = CLIPTextModel(A )
UpperCAmelCase : Optional[Any] = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
UpperCAmelCase : Union[str, Any] = {
"""unet""": unet,
"""scheduler""": scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
}
return components
def _lowercase( self , A , A=0 ) -> Optional[Any]:
# 3 frames
UpperCAmelCase : int = floats_tensor((1, 3, 3, 32, 32) , rng=random.Random(A ) ).to(A )
if str(A ).startswith("""mps""" ):
UpperCAmelCase : str = torch.manual_seed(A )
else:
UpperCAmelCase : Optional[Any] = torch.Generator(device=A ).manual_seed(A )
UpperCAmelCase : Tuple = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""video""": video,
"""generator""": generator,
"""num_inference_steps""": 2,
"""guidance_scale""": 6.0,
"""output_type""": """pt""",
}
return inputs
def _lowercase( self ) -> str:
UpperCAmelCase : Any = """cpu""" # ensure determinism for the device-dependent torch.Generator
UpperCAmelCase : Optional[Any] = self.get_dummy_components()
UpperCAmelCase : Any = VideoToVideoSDPipeline(**A )
UpperCAmelCase : Tuple = sd_pipe.to(A )
sd_pipe.set_progress_bar_config(disable=A )
UpperCAmelCase : Any = self.get_dummy_inputs(A )
UpperCAmelCase : List[Any] = """np"""
UpperCAmelCase : str = sd_pipe(**A ).frames
UpperCAmelCase : Optional[Any] = frames[0][-3:, -3:, -1]
assert frames[0].shape == (32, 32, 3)
UpperCAmelCase : List[Any] = np.array([106, 117, 113, 174, 137, 112, 148, 151, 131] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
@unittest.skipIf(
torch_device != """cuda""" or not is_xformers_available() , reason="""XFormers attention is only available with CUDA and `xformers` installed""" , )
def _lowercase( self ) -> Tuple:
self._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=A , expected_max_diff=5e-3 )
@unittest.skip(reason="""Batching needs to be properly figured out first for this pipeline.""" )
def _lowercase( self ) -> Any:
pass
@unittest.skip(reason="""Batching needs to be properly figured out first for this pipeline.""" )
def _lowercase( self ) -> List[str]:
pass
@unittest.skip(reason="""`num_images_per_prompt` argument is not supported for this pipeline.""" )
def _lowercase( self ) -> Any:
pass
def _lowercase( self ) -> int:
return super().test_progress_bar()
@slow
@skip_mps
class UpperCamelCase_ ( unittest.TestCase ):
def _lowercase( self ) -> List[Any]:
UpperCAmelCase : Optional[int] = VideoToVideoSDPipeline.from_pretrained("""cerspense/zeroscope_v2_XL""" , torch_dtype=torch.floataa )
pipe.enable_model_cpu_offload()
# 10 frames
UpperCAmelCase : List[str] = torch.Generator(device="""cpu""" ).manual_seed(0 )
UpperCAmelCase : List[str] = torch.randn((1, 10, 3, 1024, 576) , generator=A )
UpperCAmelCase : Union[str, Any] = video.to("""cuda""" )
UpperCAmelCase : int = """Spiderman is surfing"""
UpperCAmelCase : Optional[int] = pipe(A , video=A , generator=A , num_inference_steps=3 , output_type="""pt""" ).frames
UpperCAmelCase : List[str] = np.array([-1.0_4_5_8_9_8_4, -1.1_2_7_9_2_9_7, -0.9_6_6_3_0_8_6, -0.9_1_5_0_3_9_0_6, -0.7_5_0_9_7_6_5_6] )
assert np.abs(video_frames.cpu().numpy()[0, 0, 0, 0, -5:] - expected_array ).sum() < 1e-2
| 672 |
'''simple docstring'''
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionImageVariationPipeline
from diffusers.utils.testing_utils import load_image, require_torch_gpu, slow, torch_device
a : Tuple = False
class UpperCamelCase_ ( unittest.TestCase ):
pass
@slow
@require_torch_gpu
class UpperCamelCase_ ( unittest.TestCase ):
def _lowercase( self ) -> Optional[Any]:
UpperCAmelCase : Any = VersatileDiffusionImageVariationPipeline.from_pretrained("""shi-labs/versatile-diffusion""" )
pipe.to(A )
pipe.set_progress_bar_config(disable=A )
UpperCAmelCase : List[str] = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg""" )
UpperCAmelCase : List[Any] = torch.manual_seed(0 )
UpperCAmelCase : List[str] = pipe(
image=A , generator=A , guidance_scale=7.5 , num_inference_steps=50 , output_type="""numpy""" , ).images
UpperCAmelCase : Tuple = image[0, 253:256, 253:256, -1]
assert image.shape == (1, 512, 512, 3)
UpperCAmelCase : List[str] = np.array([0.0_4_4_1, 0.0_4_6_9, 0.0_5_0_7, 0.0_5_7_5, 0.0_6_3_2, 0.0_6_5_0, 0.0_8_6_5, 0.0_9_0_9, 0.0_9_4_5] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 672 | 1 |
'''simple docstring'''
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import MobileViTImageProcessor
class UpperCamelCase_ ( unittest.TestCase ):
def __init__( self , A , A=7 , A=3 , A=18 , A=30 , A=400 , A=True , A=None , A=True , A=None , A=True , ) -> Tuple:
UpperCAmelCase : Union[str, Any] = size if size is not None else {"""shortest_edge""": 20}
UpperCAmelCase : Tuple = crop_size if crop_size is not None else {"""height""": 18, """width""": 18}
UpperCAmelCase : Dict = parent
UpperCAmelCase : List[str] = batch_size
UpperCAmelCase : Optional[int] = num_channels
UpperCAmelCase : Optional[int] = image_size
UpperCAmelCase : Any = min_resolution
UpperCAmelCase : Optional[int] = max_resolution
UpperCAmelCase : str = do_resize
UpperCAmelCase : str = size
UpperCAmelCase : int = do_center_crop
UpperCAmelCase : int = crop_size
UpperCAmelCase : List[str] = do_flip_channel_order
def _lowercase( self ) -> Union[str, Any]:
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
"do_flip_channel_order": self.do_flip_channel_order,
}
@require_torch
@require_vision
class UpperCamelCase_ ( __magic_name__ , unittest.TestCase ):
lowercase = MobileViTImageProcessor if is_vision_available() else None
def _lowercase( self ) -> Union[str, Any]:
UpperCAmelCase : Union[str, Any] = MobileViTImageProcessingTester(self )
@property
def _lowercase( self ) -> Any:
return self.image_processor_tester.prepare_image_processor_dict()
def _lowercase( self ) -> int:
UpperCAmelCase : Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(A , """do_resize""" ) )
self.assertTrue(hasattr(A , """size""" ) )
self.assertTrue(hasattr(A , """do_center_crop""" ) )
self.assertTrue(hasattr(A , """center_crop""" ) )
self.assertTrue(hasattr(A , """do_flip_channel_order""" ) )
def _lowercase( self ) -> int:
UpperCAmelCase : Optional[Any] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"""shortest_edge""": 20} )
self.assertEqual(image_processor.crop_size , {"""height""": 18, """width""": 18} )
UpperCAmelCase : Union[str, Any] = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 )
self.assertEqual(image_processor.size , {"""shortest_edge""": 42} )
self.assertEqual(image_processor.crop_size , {"""height""": 84, """width""": 84} )
def _lowercase( self ) -> Optional[Any]:
pass
def _lowercase( self ) -> str:
# Initialize image_processing
UpperCAmelCase : List[str] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
UpperCAmelCase : str = prepare_image_inputs(self.image_processor_tester , equal_resolution=A )
for image in image_inputs:
self.assertIsInstance(A , Image.Image )
# Test not batched input
UpperCAmelCase : Any = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
UpperCAmelCase : int = image_processing(A , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
def _lowercase( self ) -> int:
# Initialize image_processing
UpperCAmelCase : Any = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
UpperCAmelCase : Dict = prepare_image_inputs(self.image_processor_tester , equal_resolution=A , numpify=A )
for image in image_inputs:
self.assertIsInstance(A , np.ndarray )
# Test not batched input
UpperCAmelCase : int = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
UpperCAmelCase : List[str] = image_processing(A , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
def _lowercase( self ) -> List[Any]:
# Initialize image_processing
UpperCAmelCase : List[str] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
UpperCAmelCase : int = prepare_image_inputs(self.image_processor_tester , equal_resolution=A , torchify=A )
for image in image_inputs:
self.assertIsInstance(A , torch.Tensor )
# Test not batched input
UpperCAmelCase : str = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
UpperCAmelCase : List[str] = image_processing(A , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
| 672 |
'''simple docstring'''
# Lint as: python3
import sys
from collections.abc import Mapping
from typing import TYPE_CHECKING, Dict, Optional
import numpy as np
import pyarrow as pa
from .. import config
from ..utils.logging import get_logger
from ..utils.py_utils import map_nested
from .formatting import TensorFormatter
if TYPE_CHECKING:
import jax
import jaxlib
a : Any = get_logger()
a : Optional[dict] = None
class UpperCamelCase_ ( TensorFormatter[Mapping, 'jax.Array', Mapping] ):
def __init__( self , A=None , A=None , **A ) -> str:
super().__init__(features=A )
import jax
from jaxlib.xla_client import Device
if isinstance(A , A ):
raise ValueError(
f'''Expected {device} to be a `str` not {type(A )}, as `jaxlib.xla_extension.Device` '''
"""is not serializable neither with `pickle` nor with `dill`. Instead you can surround """
"""the device with `str()` to get its string identifier that will be internally mapped """
"""to the actual `jaxlib.xla_extension.Device`.""" )
UpperCAmelCase : Optional[int] = device if isinstance(A , A ) else str(jax.devices()[0] )
# using global variable since `jaxlib.xla_extension.Device` is not serializable neither
# with `pickle` nor with `dill`, so we need to use a global variable instead
global DEVICE_MAPPING
if DEVICE_MAPPING is None:
UpperCAmelCase : Any = self._map_devices_to_str()
if self.device not in list(DEVICE_MAPPING.keys() ):
logger.warning(
f'''Device with string identifier {self.device} not listed among the available '''
f'''devices: {list(DEVICE_MAPPING.keys() )}, so falling back to the default '''
f'''device: {str(jax.devices()[0] )}.''' )
UpperCAmelCase : List[Any] = str(jax.devices()[0] )
UpperCAmelCase : Union[str, Any] = jnp_array_kwargs
@staticmethod
def _lowercase( ) -> Dict[str, "jaxlib.xla_extension.Device"]:
import jax
return {str(A ): device for device in jax.devices()}
def _lowercase( self , A ) -> str:
import jax
import jax.numpy as jnp
if isinstance(A , A ) and column:
if all(
isinstance(A , jax.Array ) and x.shape == column[0].shape and x.dtype == column[0].dtype for x in column ):
return jnp.stack(A , axis=0 )
return column
def _lowercase( self , A ) -> Tuple:
import jax
import jax.numpy as jnp
if isinstance(A , (str, bytes, type(A )) ):
return value
elif isinstance(A , (np.character, np.ndarray) ) and np.issubdtype(value.dtype , np.character ):
return value.tolist()
UpperCAmelCase : List[str] = {}
if isinstance(A , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.integer ):
# the default int precision depends on the jax config
# see https://jax.readthedocs.io/en/latest/notebooks/Common_Gotchas_in_JAX.html#double-64bit-precision
if jax.config.jax_enable_xaa:
UpperCAmelCase : str = {"""dtype""": jnp.intaa}
else:
UpperCAmelCase : int = {"""dtype""": jnp.intaa}
elif isinstance(A , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.floating ):
UpperCAmelCase : Any = {"""dtype""": jnp.floataa}
elif config.PIL_AVAILABLE and "PIL" in sys.modules:
import PIL.Image
if isinstance(A , PIL.Image.Image ):
UpperCAmelCase : List[str] = np.asarray(A )
# using global variable since `jaxlib.xla_extension.Device` is not serializable neither
# with `pickle` nor with `dill`, so we need to use a global variable instead
global DEVICE_MAPPING
if DEVICE_MAPPING is None:
UpperCAmelCase : Dict = self._map_devices_to_str()
with jax.default_device(DEVICE_MAPPING[self.device] ):
# calling jnp.array on a np.ndarray does copy the data
# see https://github.com/google/jax/issues/4486
return jnp.array(A , **{**default_dtype, **self.jnp_array_kwargs} )
def _lowercase( self , A ) -> Tuple:
import jax
# support for torch, tf, jax etc.
if config.TORCH_AVAILABLE and "torch" in sys.modules:
import torch
if isinstance(A , torch.Tensor ):
return self._tensorize(data_struct.detach().cpu().numpy()[()] )
if hasattr(A , """__array__""" ) and not isinstance(A , jax.Array ):
UpperCAmelCase : Optional[int] = data_struct.__array__()
# support for nested types like struct of list of struct
if isinstance(A , np.ndarray ):
if data_struct.dtype == object: # jax arrays cannot be instantied from an array of objects
return self._consolidate([self.recursive_tensorize(A ) for substruct in data_struct] )
elif isinstance(A , (list, tuple) ):
return self._consolidate([self.recursive_tensorize(A ) for substruct in data_struct] )
return self._tensorize(A )
def _lowercase( self , A ) -> Dict:
return map_nested(self._recursive_tensorize , A , map_list=A )
def _lowercase( self , A ) -> Mapping:
UpperCAmelCase : Union[str, Any] = self.numpy_arrow_extractor().extract_row(A )
UpperCAmelCase : Dict = self.python_features_decoder.decode_row(A )
return self.recursive_tensorize(A )
def _lowercase( self , A ) -> "jax.Array":
UpperCAmelCase : int = self.numpy_arrow_extractor().extract_column(A )
UpperCAmelCase : Optional[Any] = self.python_features_decoder.decode_column(A , pa_table.column_names[0] )
UpperCAmelCase : Optional[int] = self.recursive_tensorize(A )
UpperCAmelCase : Any = self._consolidate(A )
return column
def _lowercase( self , A ) -> Mapping:
UpperCAmelCase : Optional[int] = self.numpy_arrow_extractor().extract_batch(A )
UpperCAmelCase : List[str] = self.python_features_decoder.decode_batch(A )
UpperCAmelCase : Union[str, Any] = self.recursive_tensorize(A )
for column_name in batch:
UpperCAmelCase : Optional[Any] = self._consolidate(batch[column_name] )
return batch
| 672 | 1 |
'''simple docstring'''
import json
import os
import tempfile
import transformers
import datasets
from utils import generate_example_dataset, get_duration
a : Dict = 5_0_0_0_0_0
a , a : Optional[Any] = os.path.split(__file__)
a : Optional[Any] = os.path.join(RESULTS_BASEPATH, """results""", RESULTS_FILENAME.replace(""".py""", """.json"""))
@get_duration
def __lowerCamelCase ( _lowercase , **_lowercase ) -> List[Any]:
UpperCAmelCase : Any = dataset.map(**_lowercase )
@get_duration
def __lowerCamelCase ( _lowercase , **_lowercase ) -> int:
UpperCAmelCase : Any = dataset.filter(**_lowercase )
def __lowerCamelCase ( ) -> List[str]:
UpperCAmelCase : Union[str, Any] = {"""num examples""": SPEED_TEST_N_EXAMPLES}
with tempfile.TemporaryDirectory() as tmp_dir:
UpperCAmelCase : Optional[int] = datasets.Features({"""text""": datasets.Value("""string""" ), """numbers""": datasets.Value("""float32""" )} )
UpperCAmelCase : Dict = generate_example_dataset(
os.path.join(_lowercase , """dataset.arrow""" ) , _lowercase , num_examples=_lowercase )
UpperCAmelCase : List[Any] = transformers.AutoTokenizer.from_pretrained("""bert-base-cased""" , use_fast=_lowercase )
def tokenize(_lowercase ):
return tokenizer(examples["""text"""] )
UpperCAmelCase : Optional[Any] = map(_lowercase )
UpperCAmelCase : Any = map(_lowercase , batched=_lowercase )
UpperCAmelCase : Optional[Any] = map(_lowercase , function=lambda _lowercase : None , batched=_lowercase )
with dataset.formatted_as(type="""numpy""" ):
UpperCAmelCase : Tuple = map(_lowercase , function=lambda _lowercase : None , batched=_lowercase )
with dataset.formatted_as(type="""pandas""" ):
UpperCAmelCase : Any = map(_lowercase , function=lambda _lowercase : None , batched=_lowercase )
with dataset.formatted_as(type="""torch""" , columns="""numbers""" ):
UpperCAmelCase : int = map(_lowercase , function=lambda _lowercase : None , batched=_lowercase )
with dataset.formatted_as(type="""tensorflow""" , columns="""numbers""" ):
UpperCAmelCase : Optional[int] = map(_lowercase , function=lambda _lowercase : None , batched=_lowercase )
UpperCAmelCase : Any = map(_lowercase , function=_lowercase , batched=_lowercase )
UpperCAmelCase : Dict = filter(_lowercase )
# Activate later when tokenizer support batched inputs
# with dataset.formatted_as(type='numpy'):
# times[func.__name__ + " fast-tokenizer batched numpy"] = func(dataset, function=tokenize, batched=True)
with open(_lowercase , """wb""" ) as f:
f.write(json.dumps(_lowercase ).encode("""utf-8""" ) )
if __name__ == "__main__": # useful to run the profiler
benchmark_map_filter()
| 672 |
'''simple docstring'''
from datetime import datetime as dt
import os
from github import Github
a : int = [
"""good first issue""",
"""good second issue""",
"""good difficult issue""",
"""feature request""",
"""new model""",
"""wip""",
]
def __lowerCamelCase ( ) -> Dict:
UpperCAmelCase : str = Github(os.environ["""GITHUB_TOKEN"""] )
UpperCAmelCase : Dict = g.get_repo("""huggingface/transformers""" )
UpperCAmelCase : int = repo.get_issues(state="""open""" )
for issue in open_issues:
UpperCAmelCase : Optional[int] = sorted([comment for comment in issue.get_comments()] , key=lambda _lowercase : i.created_at , reverse=_lowercase )
UpperCAmelCase : Any = comments[0] if len(_lowercase ) > 0 else None
if (
last_comment is not None
and last_comment.user.login == "github-actions[bot]"
and (dt.utcnow() - issue.updated_at).days > 7
and (dt.utcnow() - issue.created_at).days >= 3_0
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# print(f"Would close issue {issue.number} since it has been 7 days of inactivity since bot mention.")
issue.edit(state="""closed""" )
elif (
(dt.utcnow() - issue.updated_at).days > 2_3
and (dt.utcnow() - issue.created_at).days >= 3_0
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# print(f"Would add stale comment to {issue.number}")
issue.create_comment(
"""This issue has been automatically marked as stale because it has not had """
"""recent activity. If you think this still needs to be addressed """
"""please comment on this thread.\n\nPlease note that issues that do not follow the """
"""[contributing guidelines](https://github.com/huggingface/transformers/blob/main/CONTRIBUTING.md) """
"""are likely to be ignored.""" )
if __name__ == "__main__":
main()
| 672 | 1 |
'''simple docstring'''
def __lowerCamelCase ( _lowercase , _lowercase ) -> float:
return base * power(_lowercase , (exponent - 1) ) if exponent else 1
if __name__ == "__main__":
print("""Raise base to the power of exponent using recursion...""")
a : str = int(input("""Enter the base: """).strip())
a : str = int(input("""Enter the exponent: """).strip())
a : Dict = power(base, abs(exponent))
if exponent < 0: # power() does not properly deal w/ negative exponents
a : List[str] = 1 / result
print(F'''{base} to the power of {exponent} is {result}''')
| 672 |
'''simple docstring'''
import unittest
from parameterized import parameterized
from transformers import OpenLlamaConfig, is_torch_available, set_seed
from transformers.testing_utils import require_torch, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import OpenLlamaForCausalLM, OpenLlamaForSequenceClassification, OpenLlamaModel
class UpperCamelCase_ :
def __init__( self , A , A=13 , A=7 , A=True , A=True , A=False , A=True , A=99 , A=32 , A=5 , A=4 , A=37 , A="gelu" , A=0.1 , A=0.1 , A=512 , A=16 , A=2 , A=0.0_2 , A=3 , A=4 , A=None , ) -> Any:
UpperCAmelCase : Optional[int] = parent
UpperCAmelCase : List[Any] = batch_size
UpperCAmelCase : Union[str, Any] = seq_length
UpperCAmelCase : Optional[int] = is_training
UpperCAmelCase : str = use_input_mask
UpperCAmelCase : Optional[int] = use_token_type_ids
UpperCAmelCase : Dict = use_labels
UpperCAmelCase : str = vocab_size
UpperCAmelCase : Optional[int] = hidden_size
UpperCAmelCase : str = num_hidden_layers
UpperCAmelCase : Any = num_attention_heads
UpperCAmelCase : Union[str, Any] = intermediate_size
UpperCAmelCase : str = hidden_act
UpperCAmelCase : Any = hidden_dropout_prob
UpperCAmelCase : str = attention_probs_dropout_prob
UpperCAmelCase : Tuple = max_position_embeddings
UpperCAmelCase : Optional[Any] = type_vocab_size
UpperCAmelCase : Optional[Any] = type_sequence_label_size
UpperCAmelCase : str = initializer_range
UpperCAmelCase : List[Any] = num_labels
UpperCAmelCase : Dict = num_choices
UpperCAmelCase : Tuple = scope
def _lowercase( self ) -> Dict:
UpperCAmelCase : Dict = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCAmelCase : Optional[Any] = None
if self.use_input_mask:
UpperCAmelCase : Optional[int] = random_attention_mask([self.batch_size, self.seq_length] )
UpperCAmelCase : Optional[Any] = None
if self.use_token_type_ids:
UpperCAmelCase : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
UpperCAmelCase : Union[str, Any] = None
UpperCAmelCase : Dict = None
UpperCAmelCase : Union[str, Any] = None
if self.use_labels:
UpperCAmelCase : Optional[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCAmelCase : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
UpperCAmelCase : Optional[Any] = ids_tensor([self.batch_size] , self.num_choices )
UpperCAmelCase : int = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def _lowercase( self ) -> Dict:
return OpenLlamaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=A , initializer_range=self.initializer_range , use_stable_embedding=A , )
def _lowercase( self , A , A , A , A , A , A , A ) -> str:
UpperCAmelCase : Union[str, Any] = OpenLlamaModel(config=A )
model.to(A )
model.eval()
UpperCAmelCase : Dict = model(A , attention_mask=A )
UpperCAmelCase : Optional[int] = model(A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _lowercase( self , A , A , A , A , A , A , A , A , A , ) -> List[Any]:
UpperCAmelCase : Optional[int] = True
UpperCAmelCase : Union[str, Any] = OpenLlamaModel(A )
model.to(A )
model.eval()
UpperCAmelCase : List[Any] = model(
A , attention_mask=A , encoder_hidden_states=A , encoder_attention_mask=A , )
UpperCAmelCase : str = model(
A , attention_mask=A , encoder_hidden_states=A , )
UpperCAmelCase : List[Any] = model(A , attention_mask=A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _lowercase( self , A , A , A , A , A , A , A , A , A , ) -> int:
UpperCAmelCase : Optional[int] = OpenLlamaForCausalLM(config=A )
model.to(A )
model.eval()
UpperCAmelCase : Tuple = model(A , attention_mask=A , labels=A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _lowercase( self , A , A , A , A , A , A , A , A , A , ) -> int:
UpperCAmelCase : Dict = True
UpperCAmelCase : Tuple = True
UpperCAmelCase : str = OpenLlamaForCausalLM(config=A )
model.to(A )
model.eval()
# first forward pass
UpperCAmelCase : Union[str, Any] = model(
A , attention_mask=A , encoder_hidden_states=A , encoder_attention_mask=A , use_cache=A , )
UpperCAmelCase : str = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
UpperCAmelCase : Tuple = ids_tensor((self.batch_size, 3) , config.vocab_size )
UpperCAmelCase : Tuple = ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
UpperCAmelCase : List[str] = torch.cat([input_ids, next_tokens] , dim=-1 )
UpperCAmelCase : List[Any] = torch.cat([input_mask, next_mask] , dim=-1 )
UpperCAmelCase : List[Any] = model(
A , attention_mask=A , encoder_hidden_states=A , encoder_attention_mask=A , output_hidden_states=A , )["""hidden_states"""][0]
UpperCAmelCase : Optional[Any] = model(
A , attention_mask=A , encoder_hidden_states=A , encoder_attention_mask=A , past_key_values=A , output_hidden_states=A , )["""hidden_states"""][0]
# select random slice
UpperCAmelCase : str = ids_tensor((1,) , output_from_past.shape[-1] ).item()
UpperCAmelCase : Any = output_from_no_past[:, -3:, random_slice_idx].detach()
UpperCAmelCase : Dict = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(A , A , atol=1e-3 ) )
def _lowercase( self ) -> Union[str, Any]:
UpperCAmelCase : Union[str, Any] = self.prepare_config_and_inputs()
(
(
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) ,
) : Tuple = config_and_inputs
UpperCAmelCase : str = {"""input_ids""": input_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class UpperCamelCase_ ( __magic_name__ , __magic_name__ , __magic_name__ , unittest.TestCase ):
lowercase = (
(OpenLlamaModel, OpenLlamaForCausalLM, OpenLlamaForSequenceClassification) if is_torch_available() else ()
)
lowercase = (OpenLlamaForCausalLM,) if is_torch_available() else ()
lowercase = (
{
'feature-extraction': OpenLlamaModel,
'text-classification': OpenLlamaForSequenceClassification,
'text-generation': OpenLlamaForCausalLM,
'zero-shot': OpenLlamaForSequenceClassification,
}
if is_torch_available()
else {}
)
lowercase = False
lowercase = False
def _lowercase( self ) -> Tuple:
UpperCAmelCase : Dict = OpenLlamaModelTester(self )
UpperCAmelCase : List[str] = ConfigTester(self , config_class=A , hidden_size=37 )
def _lowercase( self ) -> Optional[Any]:
self.config_tester.run_common_tests()
def _lowercase( self ) -> int:
UpperCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*A )
def _lowercase( self ) -> str:
UpperCAmelCase : Any = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
UpperCAmelCase : int = type
self.model_tester.create_and_check_model(*A )
def _lowercase( self ) -> str:
UpperCAmelCase , UpperCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase : List[str] = 3
UpperCAmelCase : Optional[Any] = input_dict["""input_ids"""]
UpperCAmelCase : str = input_ids.ne(1 ).to(A )
UpperCAmelCase : Tuple = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
UpperCAmelCase : Optional[Any] = OpenLlamaForSequenceClassification(A )
model.to(A )
model.eval()
UpperCAmelCase : List[Any] = model(A , attention_mask=A , labels=A )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def _lowercase( self ) -> Union[str, Any]:
UpperCAmelCase , UpperCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase : Dict = 3
UpperCAmelCase : Any = """single_label_classification"""
UpperCAmelCase : Dict = input_dict["""input_ids"""]
UpperCAmelCase : Optional[Any] = input_ids.ne(1 ).to(A )
UpperCAmelCase : str = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
UpperCAmelCase : Optional[Any] = OpenLlamaForSequenceClassification(A )
model.to(A )
model.eval()
UpperCAmelCase : Tuple = model(A , attention_mask=A , labels=A )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def _lowercase( self ) -> int:
UpperCAmelCase , UpperCAmelCase : Dict = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase : Tuple = 3
UpperCAmelCase : Optional[Any] = """multi_label_classification"""
UpperCAmelCase : Dict = input_dict["""input_ids"""]
UpperCAmelCase : int = input_ids.ne(1 ).to(A )
UpperCAmelCase : int = ids_tensor(
[self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float )
UpperCAmelCase : Any = OpenLlamaForSequenceClassification(A )
model.to(A )
model.eval()
UpperCAmelCase : Dict = model(A , attention_mask=A , labels=A )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
@unittest.skip("""Open-Llama buffers include complex numbers, which breaks this test""" )
def _lowercase( self ) -> Dict:
pass
@parameterized.expand([("""linear""",), ("""dynamic""",)] )
def _lowercase( self , A ) -> str:
UpperCAmelCase , UpperCAmelCase : Any = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase : Dict = ids_tensor([1, 10] , config.vocab_size )
UpperCAmelCase : List[str] = ids_tensor([1, int(config.max_position_embeddings * 1.5 )] , config.vocab_size )
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
UpperCAmelCase : Any = OpenLlamaModel(A )
original_model.to(A )
original_model.eval()
UpperCAmelCase : List[str] = original_model(A ).last_hidden_state
UpperCAmelCase : List[Any] = original_model(A ).last_hidden_state
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
UpperCAmelCase : Union[str, Any] = {"""type""": scaling_type, """factor""": 1_0.0}
UpperCAmelCase : str = OpenLlamaModel(A )
scaled_model.to(A )
scaled_model.eval()
UpperCAmelCase : List[str] = scaled_model(A ).last_hidden_state
UpperCAmelCase : Optional[int] = scaled_model(A ).last_hidden_state
# Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original
# maximum sequence length, so the outputs for the short input should match.
if scaling_type == "dynamic":
self.assertTrue(torch.allclose(A , A , atol=1e-5 ) )
else:
self.assertFalse(torch.allclose(A , A , atol=1e-5 ) )
# The output should be different for long inputs
self.assertFalse(torch.allclose(A , A , atol=1e-5 ) )
| 672 | 1 |
'''simple docstring'''
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import CLIPSegProcessor, ViTImageProcessor
@require_vision
class UpperCamelCase_ ( unittest.TestCase ):
def _lowercase( self ) -> List[Any]:
UpperCAmelCase : Optional[Any] = tempfile.mkdtemp()
# fmt: off
UpperCAmelCase : Dict = ["""l""", """o""", """w""", """e""", """r""", """s""", """t""", """i""", """d""", """n""", """lo""", """l</w>""", """w</w>""", """r</w>""", """t</w>""", """low</w>""", """er</w>""", """lowest</w>""", """newer</w>""", """wider""", """<unk>""", """<|startoftext|>""", """<|endoftext|>"""]
# fmt: on
UpperCAmelCase : Optional[Any] = dict(zip(A__ , range(len(A__ ) ) ) )
UpperCAmelCase : List[Any] = ["""#version: 0.2""", """l o""", """lo w</w>""", """e r</w>""", """"""]
UpperCAmelCase : Union[str, Any] = {"""unk_token""": """<unk>"""}
UpperCAmelCase : int = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
UpperCAmelCase : List[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(A__ ) + """\n""" )
with open(self.merges_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write("""\n""".join(A__ ) )
UpperCAmelCase : Optional[int] = {
"""do_resize""": True,
"""size""": 20,
"""do_center_crop""": True,
"""crop_size""": 18,
"""do_normalize""": True,
"""image_mean""": [0.4_8_1_4_5_4_6_6, 0.4_5_7_8_2_7_5, 0.4_0_8_2_1_0_7_3],
"""image_std""": [0.2_6_8_6_2_9_5_4, 0.2_6_1_3_0_2_5_8, 0.2_7_5_7_7_7_1_1],
}
UpperCAmelCase : List[str] = os.path.join(self.tmpdirname , A__ )
with open(self.image_processor_file , """w""" , encoding="""utf-8""" ) as fp:
json.dump(A__ , A__ )
def _lowercase( self , **A ) -> Any:
return CLIPTokenizer.from_pretrained(self.tmpdirname , **A__ )
def _lowercase( self , **A ) -> str:
return CLIPTokenizerFast.from_pretrained(self.tmpdirname , **A__ )
def _lowercase( self , **A ) -> Dict:
return ViTImageProcessor.from_pretrained(self.tmpdirname , **A__ )
def _lowercase( self ) -> Any:
shutil.rmtree(self.tmpdirname )
def _lowercase( self ) -> List[Any]:
UpperCAmelCase : List[str] = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
UpperCAmelCase : List[str] = [Image.fromarray(np.moveaxis(A__ , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def _lowercase( self ) -> Union[str, Any]:
UpperCAmelCase : Union[str, Any] = self.get_tokenizer()
UpperCAmelCase : Optional[int] = self.get_rust_tokenizer()
UpperCAmelCase : List[str] = self.get_image_processor()
UpperCAmelCase : Dict = CLIPSegProcessor(tokenizer=A__ , image_processor=A__ )
processor_slow.save_pretrained(self.tmpdirname )
UpperCAmelCase : Any = CLIPSegProcessor.from_pretrained(self.tmpdirname , use_fast=A__ )
UpperCAmelCase : List[str] = CLIPSegProcessor(tokenizer=A__ , image_processor=A__ )
processor_fast.save_pretrained(self.tmpdirname )
UpperCAmelCase : List[str] = CLIPSegProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , A__ )
self.assertIsInstance(processor_fast.tokenizer , A__ )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , A__ )
self.assertIsInstance(processor_fast.image_processor , A__ )
def _lowercase( self ) -> Optional[Any]:
UpperCAmelCase : Any = CLIPSegProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
UpperCAmelCase : Optional[int] = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""" )
UpperCAmelCase : int = self.get_image_processor(do_normalize=A__ , padding_value=1.0 )
UpperCAmelCase : Optional[Any] = CLIPSegProcessor.from_pretrained(
self.tmpdirname , bos_token="""(BOS)""" , eos_token="""(EOS)""" , do_normalize=A__ , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , A__ )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , A__ )
def _lowercase( self ) -> Dict:
UpperCAmelCase : Optional[int] = self.get_image_processor()
UpperCAmelCase : Any = self.get_tokenizer()
UpperCAmelCase : Tuple = CLIPSegProcessor(tokenizer=A__ , image_processor=A__ )
UpperCAmelCase : int = self.prepare_image_inputs()
UpperCAmelCase : int = image_processor(A__ , return_tensors="""np""" )
UpperCAmelCase : Dict = processor(images=A__ , return_tensors="""np""" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
def _lowercase( self ) -> Tuple:
UpperCAmelCase : Tuple = self.get_image_processor()
UpperCAmelCase : Any = self.get_tokenizer()
UpperCAmelCase : str = CLIPSegProcessor(tokenizer=A__ , image_processor=A__ )
UpperCAmelCase : List[str] = """lower newer"""
UpperCAmelCase : str = processor(text=A__ )
UpperCAmelCase : Any = tokenizer(A__ )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def _lowercase( self ) -> Any:
UpperCAmelCase : List[str] = self.get_image_processor()
UpperCAmelCase : List[str] = self.get_tokenizer()
UpperCAmelCase : Union[str, Any] = CLIPSegProcessor(tokenizer=A__ , image_processor=A__ )
UpperCAmelCase : Union[str, Any] = """lower newer"""
UpperCAmelCase : Tuple = self.prepare_image_inputs()
UpperCAmelCase : str = processor(text=A__ , images=A__ )
self.assertListEqual(list(inputs.keys() ) , ["""input_ids""", """attention_mask""", """pixel_values"""] )
# test if it raises when no input is passed
with pytest.raises(A__ ):
processor()
def _lowercase( self ) -> int:
UpperCAmelCase : Optional[Any] = self.get_image_processor()
UpperCAmelCase : Dict = self.get_tokenizer()
UpperCAmelCase : Tuple = CLIPSegProcessor(tokenizer=A__ , image_processor=A__ )
UpperCAmelCase : Optional[Any] = self.prepare_image_inputs()
UpperCAmelCase : Dict = self.prepare_image_inputs()
UpperCAmelCase : Dict = processor(images=A__ , visual_prompt=A__ )
self.assertListEqual(list(inputs.keys() ) , ["""pixel_values""", """conditional_pixel_values"""] )
# test if it raises when no input is passed
with pytest.raises(A__ ):
processor()
def _lowercase( self ) -> Optional[Any]:
UpperCAmelCase : Optional[int] = self.get_image_processor()
UpperCAmelCase : Optional[Any] = self.get_tokenizer()
UpperCAmelCase : Tuple = CLIPSegProcessor(tokenizer=A__ , image_processor=A__ )
UpperCAmelCase : Optional[int] = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
UpperCAmelCase : Dict = processor.batch_decode(A__ )
UpperCAmelCase : Dict = tokenizer.batch_decode(A__ )
self.assertListEqual(A__ , A__ )
| 700 |
'''simple docstring'''
import math
def __lowerCamelCase ( _lowercase ) -> bool:
assert isinstance(_lowercase , _lowercase ) and (
number >= 0
), "'number' must been an int and positive"
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or not number % 2:
# Negatives, 0, 1 and all even numbers are not primes
return False
UpperCAmelCase : str = range(3 , int(math.sqrt(_lowercase ) + 1 ) , 2 )
return not any(not number % i for i in odd_numbers )
def __lowerCamelCase ( _lowercase , _lowercase=1 , **_lowercase ) -> Union[str, Any]:
UpperCAmelCase : Optional[int] = factor * value
UpperCAmelCase : List[Any] = value
while not is_prime(_lowercase ):
value += 1 if not ("desc" in kwargs and kwargs["desc"] is True) else -1
if value == first_value_val:
return next_prime(value + 1 , **_lowercase )
return value
| 672 | 0 |
'''simple docstring'''
import numpy
# List of input, output pairs
a : Optional[int] = (
((5, 2, 3), 1_5),
((6, 5, 9), 2_5),
((1_1, 1_2, 1_3), 4_1),
((1, 1, 1), 8),
((1_1, 1_2, 1_3), 4_1),
)
a : Optional[int] = (((5_1_5, 2_2, 1_3), 5_5_5), ((6_1, 3_5, 4_9), 1_5_0))
a : Dict = [2, 4, 1, 5]
a : Tuple = len(train_data)
a : Optional[int] = 0.0_0_9
def __lowerCamelCase ( _lowercase , _lowercase="train" ) -> List[Any]:
return calculate_hypothesis_value(__A , __A ) - output(
__A , __A )
def __lowerCamelCase ( _lowercase ) -> Dict:
UpperCAmelCase : str = 0
for i in range(len(__A ) - 1 ):
hyp_val += data_input_tuple[i] * parameter_vector[i + 1]
hyp_val += parameter_vector[0]
return hyp_val
def __lowerCamelCase ( _lowercase , _lowercase ) -> Any:
if data_set == "train":
return train_data[example_no][1]
elif data_set == "test":
return test_data[example_no][1]
return None
def __lowerCamelCase ( _lowercase , _lowercase ) -> str:
if data_set == "train":
return _hypothesis_value(train_data[example_no][0] )
elif data_set == "test":
return _hypothesis_value(test_data[example_no][0] )
return None
def __lowerCamelCase ( _lowercase , _lowercase=m ) -> str:
UpperCAmelCase : List[Any] = 0
for i in range(__A ):
if index == -1:
summation_value += _error(__A )
else:
summation_value += _error(__A ) * train_data[i][0][index]
return summation_value
def __lowerCamelCase ( _lowercase ) -> Tuple:
UpperCAmelCase : List[Any] = summation_of_cost_derivative(__A , __A ) / m
return cost_derivative_value
def __lowerCamelCase ( ) -> Dict:
global parameter_vector
# Tune these values to set a tolerance value for predicted output
UpperCAmelCase : str = 0.00_0002
UpperCAmelCase : List[Any] = 0
UpperCAmelCase : Any = 0
while True:
j += 1
UpperCAmelCase : Tuple = [0, 0, 0, 0]
for i in range(0 , len(__A ) ):
UpperCAmelCase : Optional[Any] = get_cost_derivative(i - 1 )
UpperCAmelCase : Tuple = (
parameter_vector[i] - LEARNING_RATE * cost_derivative
)
if numpy.allclose(
__A , __A , atol=__A , rtol=__A , ):
break
UpperCAmelCase : Dict = temp_parameter_vector
print(("""Number of iterations:""", j) )
def __lowerCamelCase ( ) -> List[str]:
for i in range(len(__A ) ):
print(("""Actual output value:""", output(__A , """test""" )) )
print(("""Hypothesis output:""", calculate_hypothesis_value(__A , """test""" )) )
if __name__ == "__main__":
run_gradient_descent()
print("""\nTesting gradient descent for a linear hypothesis function.\n""")
test_gradient_descent()
| 701 |
'''simple docstring'''
def __lowerCamelCase ( _lowercase = "The quick brown fox jumps over the lazy dog" , ) -> bool:
UpperCAmelCase : Union[str, Any] = set()
# Replace all the whitespace in our sentence
UpperCAmelCase : List[str] = input_str.replace(""" """ , """""" )
for alpha in input_str:
if "a" <= alpha.lower() <= "z":
frequency.add(alpha.lower() )
return len(_lowercase ) == 2_6
def __lowerCamelCase ( _lowercase = "The quick brown fox jumps over the lazy dog" , ) -> bool:
UpperCAmelCase : Tuple = [False] * 2_6
for char in input_str:
if char.islower():
UpperCAmelCase : Any = True
elif char.isupper():
UpperCAmelCase : Union[str, Any] = True
return all(_lowercase )
def __lowerCamelCase ( _lowercase = "The quick brown fox jumps over the lazy dog" , ) -> bool:
return len({char for char in input_str.lower() if char.isalpha()} ) == 2_6
def __lowerCamelCase ( ) -> None:
from timeit import timeit
UpperCAmelCase : str = """from __main__ import is_pangram, is_pangram_faster, is_pangram_fastest"""
print(timeit("""is_pangram()""" , setup=_lowercase ) )
print(timeit("""is_pangram_faster()""" , setup=_lowercase ) )
print(timeit("""is_pangram_fastest()""" , setup=_lowercase ) )
# 5.348480500048026, 2.6477354579837993, 1.8470395830227062
# 5.036091582966037, 2.644472333951853, 1.8869528750656173
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 672 | 0 |
'''simple docstring'''
import warnings
from typing import Any, Dict, List, Optional, Union
import numpy as np
from ...audio_utils import mel_filter_bank, optimal_fft_length, spectrogram, window_function
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import PaddingStrategy, TensorType, logging
a : Optional[int] = logging.get_logger(__name__)
class UpperCamelCase_ ( snake_case__ ):
lowercase = ["""input_values""", """attention_mask"""]
def __init__( self , A = 1 , A = 16000 , A = 0.0 , A = False , A = 80 , A = 16 , A = 64 , A = "hann_window" , A = 1.0 , A = 80 , A = 7600 , A = 1e-10 , A = 2 , A = True , **A , ) -> str:
super().__init__(feature_size=UpperCAmelCase_ , sampling_rate=UpperCAmelCase_ , padding_value=UpperCAmelCase_ , **UpperCAmelCase_ )
UpperCAmelCase : str = do_normalize
UpperCAmelCase : List[Any] = return_attention_mask
UpperCAmelCase : List[Any] = num_mel_bins
UpperCAmelCase : Union[str, Any] = hop_length
UpperCAmelCase : List[Any] = win_length
UpperCAmelCase : Dict = win_function
UpperCAmelCase : Any = frame_signal_scale
UpperCAmelCase : int = fmin
UpperCAmelCase : str = fmax
UpperCAmelCase : List[str] = mel_floor
UpperCAmelCase : Optional[Any] = reduction_factor
UpperCAmelCase : Union[str, Any] = win_length * sampling_rate // 1000
UpperCAmelCase : Any = hop_length * sampling_rate // 1000
UpperCAmelCase : Optional[Any] = optimal_fft_length(self.sample_size )
UpperCAmelCase : str = (self.n_fft // 2) + 1
UpperCAmelCase : int = window_function(window_length=self.sample_size , name=self.win_function , periodic=UpperCAmelCase_ )
UpperCAmelCase : Dict = mel_filter_bank(
num_frequency_bins=self.n_freqs , num_mel_filters=self.num_mel_bins , min_frequency=self.fmin , max_frequency=self.fmax , sampling_rate=self.sampling_rate , norm="""slaney""" , mel_scale="""slaney""" , )
if frame_signal_scale != 1.0:
warnings.warn(
"""The argument `frame_signal_scale` is deprecated and will be removed in version 4.30.0 of Transformers""" , UpperCAmelCase_ , )
if reduction_factor != 2.0:
warnings.warn(
"""The argument `reduction_factor` is deprecated and will be removed in version 4.30.0 of Transformers""" , UpperCAmelCase_ , )
@staticmethod
# Copied from transformers.models.wav2vec2.feature_extraction_wav2vec2.Wav2Vec2FeatureExtractor.zero_mean_unit_var_norm
def _lowercase( A , A , A = 0.0 ) -> List[np.ndarray]:
if attention_mask is not None:
UpperCAmelCase : List[Any] = np.array(UpperCAmelCase_ , np.intaa )
UpperCAmelCase : List[Any] = []
for vector, length in zip(UpperCAmelCase_ , attention_mask.sum(-1 ) ):
UpperCAmelCase : str = (vector - vector[:length].mean()) / np.sqrt(vector[:length].var() + 1e-7 )
if length < normed_slice.shape[0]:
UpperCAmelCase : List[str] = padding_value
normed_input_values.append(UpperCAmelCase_ )
else:
UpperCAmelCase : List[Any] = [(x - x.mean()) / np.sqrt(x.var() + 1e-7 ) for x in input_values]
return normed_input_values
def _lowercase( self , A , ) -> np.ndarray:
UpperCAmelCase : List[Any] = spectrogram(
UpperCAmelCase_ , window=self.window , frame_length=self.sample_size , hop_length=self.sample_stride , fft_length=self.n_fft , mel_filters=self.mel_filters , mel_floor=self.mel_floor , log_mel="""log10""" , )
return log_mel_spec.T
def __call__( self , A = None , A = None , A = False , A = None , A = False , A = None , A = None , A = None , A = None , **A , ) -> BatchFeature:
if audio is None and audio_target is None:
raise ValueError("""You must provide either `audio` or `audio_target` values.""" )
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
f'''The model corresponding to this feature extractor: {self} was trained using a sampling rate of'''
f''' {self.sampling_rate}. Please make sure that the provided audio input was sampled with'''
f''' {self.sampling_rate} and not {sampling_rate}.''' )
else:
logger.warning(
"""It is strongly recommended to pass the ``sampling_rate`` argument to this function. """
"""Failing to do so can result in silent errors that might be hard to debug.""" )
if audio is not None:
UpperCAmelCase : List[Any] = self._process_audio(
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , **UpperCAmelCase_ , )
else:
UpperCAmelCase : Tuple = None
if audio_target is not None:
UpperCAmelCase : Union[str, Any] = self._process_audio(
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , **UpperCAmelCase_ , )
if inputs is None:
return inputs_target
else:
UpperCAmelCase : Union[str, Any] = inputs_target["""input_values"""]
UpperCAmelCase : List[Any] = inputs_target.get("""attention_mask""" )
if decoder_attention_mask is not None:
UpperCAmelCase : Optional[int] = decoder_attention_mask
return inputs
def _lowercase( self , A , A = False , A = False , A = None , A = False , A = None , A = None , A = None , **A , ) -> BatchFeature:
UpperCAmelCase : str = isinstance(UpperCAmelCase_ , np.ndarray ) and len(speech.shape ) > 1
if is_batched_numpy and len(speech.shape ) > 2:
raise ValueError(f'''Only mono-channel audio is supported for input to {self}''' )
UpperCAmelCase : Tuple = is_batched_numpy or (
isinstance(UpperCAmelCase_ , (list, tuple) ) and (isinstance(speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
UpperCAmelCase : Optional[int] = [np.asarray(UpperCAmelCase_ , dtype=np.floataa ) for speech in speech]
elif not is_batched and not isinstance(UpperCAmelCase_ , np.ndarray ):
UpperCAmelCase : Tuple = np.asarray(UpperCAmelCase_ , dtype=np.floataa )
elif isinstance(UpperCAmelCase_ , np.ndarray ) and speech.dtype is np.dtype(np.floataa ):
UpperCAmelCase : List[str] = speech.astype(np.floataa )
# always return batch
if not is_batched:
UpperCAmelCase : List[str] = [speech]
# needed to make pad() work on spectrogram inputs
UpperCAmelCase : Dict = self.feature_size
# convert into correct format for padding
if is_target:
UpperCAmelCase : Any = [self._extract_mel_features(UpperCAmelCase_ ) for waveform in speech]
UpperCAmelCase : Any = BatchFeature({"""input_values""": features} )
UpperCAmelCase : Any = self.num_mel_bins
else:
UpperCAmelCase : Optional[Any] = BatchFeature({"""input_values""": speech} )
UpperCAmelCase : int = self.pad(
UpperCAmelCase_ , padding=UpperCAmelCase_ , max_length=UpperCAmelCase_ , truncation=UpperCAmelCase_ , pad_to_multiple_of=UpperCAmelCase_ , return_attention_mask=UpperCAmelCase_ , **UpperCAmelCase_ , )
UpperCAmelCase : Tuple = feature_size_hack
# convert input values to correct format
UpperCAmelCase : int = padded_inputs["""input_values"""]
if not isinstance(input_values[0] , np.ndarray ):
UpperCAmelCase : Tuple = [np.asarray(UpperCAmelCase_ , dtype=np.floataa ) for array in input_values]
elif (
not isinstance(UpperCAmelCase_ , np.ndarray )
and isinstance(input_values[0] , np.ndarray )
and input_values[0].dtype is np.dtype(np.floataa )
):
UpperCAmelCase : List[Any] = [array.astype(np.floataa ) for array in input_values]
elif isinstance(UpperCAmelCase_ , np.ndarray ) and input_values.dtype is np.dtype(np.floataa ):
UpperCAmelCase : Optional[Any] = input_values.astype(np.floataa )
# convert attention_mask to correct format
UpperCAmelCase : Tuple = padded_inputs.get("""attention_mask""" )
if attention_mask is not None:
UpperCAmelCase : int = [np.asarray(UpperCAmelCase_ , dtype=np.intaa ) for array in attention_mask]
# zero-mean and unit-variance normalization
if not is_target and self.do_normalize:
UpperCAmelCase : Optional[int] = (
attention_mask
if self._get_padding_strategies(UpperCAmelCase_ , max_length=UpperCAmelCase_ ) is not PaddingStrategy.DO_NOT_PAD
else None
)
UpperCAmelCase : int = self.zero_mean_unit_var_norm(
padded_inputs["""input_values"""] , attention_mask=UpperCAmelCase_ , padding_value=self.padding_value )
if return_tensors is not None:
UpperCAmelCase : Dict = padded_inputs.convert_to_tensors(UpperCAmelCase_ )
return padded_inputs
def _lowercase( self ) -> Dict[str, Any]:
UpperCAmelCase : Tuple = super().to_dict()
# Don't serialize these as they are derived from the other properties.
UpperCAmelCase : Dict = ["""window""", """mel_filters""", """sample_size""", """sample_stride""", """n_fft""", """n_freqs"""]
for name in names:
if name in output:
del output[name]
return output
| 702 |
'''simple docstring'''
from sklearn.metrics import mean_squared_error
import datasets
a : Union[str, Any] = """\
@article{scikit-learn,
title={Scikit-learn: Machine Learning in {P}ython},
author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V.
and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P.
and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and
Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.},
journal={Journal of Machine Learning Research},
volume={12},
pages={2825--2830},
year={2011}
}
"""
a : int = """\
Mean Squared Error(MSE) is the average of the square of difference between the predicted
and actual values.
"""
a : int = """
Args:
predictions: array-like of shape (n_samples,) or (n_samples, n_outputs)
Estimated target values.
references: array-like of shape (n_samples,) or (n_samples, n_outputs)
Ground truth (correct) target values.
sample_weight: array-like of shape (n_samples,), default=None
Sample weights.
multioutput: {\"raw_values\", \"uniform_average\"} or array-like of shape (n_outputs,), default=\"uniform_average\"
Defines aggregating of multiple output values. Array-like value defines weights used to average errors.
\"raw_values\" : Returns a full set of errors in case of multioutput input.
\"uniform_average\" : Errors of all outputs are averaged with uniform weight.
squared : bool, default=True
If True returns MSE value, if False returns RMSE (Root Mean Squared Error) value.
Returns:
mse : mean squared error.
Examples:
>>> mse_metric = datasets.load_metric(\"mse\")
>>> predictions = [2.5, 0.0, 2, 8]
>>> references = [3, -0.5, 2, 7]
>>> results = mse_metric.compute(predictions=predictions, references=references)
>>> print(results)
{'mse': 0.375}
>>> rmse_result = mse_metric.compute(predictions=predictions, references=references, squared=False)
>>> print(rmse_result)
{'mse': 0.6123724356957945}
If you're using multi-dimensional lists, then set the config as follows :
>>> mse_metric = datasets.load_metric(\"mse\", \"multilist\")
>>> predictions = [[0.5, 1], [-1, 1], [7, -6]]
>>> references = [[0, 2], [-1, 2], [8, -5]]
>>> results = mse_metric.compute(predictions=predictions, references=references)
>>> print(results)
{'mse': 0.7083333333333334}
>>> results = mse_metric.compute(predictions=predictions, references=references, multioutput='raw_values')
>>> print(results) # doctest: +NORMALIZE_WHITESPACE
{'mse': array([0.41666667, 1. ])}
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class UpperCamelCase_ ( datasets.Metric ):
def _lowercase( self ) -> List[str]:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(self._get_feature_types() ) , reference_urls=[
"""https://scikit-learn.org/stable/modules/generated/sklearn.metrics.mean_squared_error.html"""
] , )
def _lowercase( self ) -> List[Any]:
if self.config_name == "multilist":
return {
"predictions": datasets.Sequence(datasets.Value("""float""" ) ),
"references": datasets.Sequence(datasets.Value("""float""" ) ),
}
else:
return {
"predictions": datasets.Value("""float""" ),
"references": datasets.Value("""float""" ),
}
def _lowercase( self , A , A , A=None , A="uniform_average" , A=True ) -> List[Any]:
UpperCAmelCase : List[Any] = mean_squared_error(
A , A , sample_weight=A , multioutput=A , squared=A )
return {"mse": mse}
| 672 | 0 |
from typing import Any
def __lowerCamelCase ( _lowercase ) -> list[Any]:
if not input_list:
return []
UpperCAmelCase : Any = [input_list.count(_lowercase ) for value in input_list]
UpperCAmelCase : int = max(_lowercase ) # Gets the maximum count in the input list.
# Gets values of modes
return sorted({input_list[i] for i, value in enumerate(_lowercase ) if value == y} )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 703 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a : str = logging.get_logger(__name__)
a : Any = {
"""google/fnet-base""": """https://huggingface.co/google/fnet-base/resolve/main/config.json""",
"""google/fnet-large""": """https://huggingface.co/google/fnet-large/resolve/main/config.json"""
# See all FNet models at https://huggingface.co/models?filter=fnet
}
class UpperCamelCase_ ( __magic_name__ ):
lowercase = 'fnet'
def __init__( self , A=32000 , A=768 , A=12 , A=3072 , A="gelu_new" , A=0.1 , A=512 , A=4 , A=0.0_2 , A=1e-12 , A=False , A=512 , A=3 , A=1 , A=2 , **A , ) -> int:
super().__init__(pad_token_id=A , bos_token_id=A , eos_token_id=A , **A )
UpperCAmelCase : Optional[Any] = vocab_size
UpperCAmelCase : Dict = max_position_embeddings
UpperCAmelCase : List[str] = hidden_size
UpperCAmelCase : int = num_hidden_layers
UpperCAmelCase : Any = intermediate_size
UpperCAmelCase : List[str] = hidden_act
UpperCAmelCase : Tuple = hidden_dropout_prob
UpperCAmelCase : List[str] = initializer_range
UpperCAmelCase : List[Any] = type_vocab_size
UpperCAmelCase : int = layer_norm_eps
UpperCAmelCase : Optional[Any] = use_tpu_fourier_optimizations
UpperCAmelCase : List[Any] = tpu_short_seq_length
| 672 | 0 |
import os
from collections.abc import Iterator
def __lowerCamelCase ( _lowercase = "." ) -> Iterator[str]:
for dir_path, dir_names, filenames in os.walk(_lowercase ):
UpperCAmelCase : Optional[int] = [d for d in dir_names if d != 'scripts' and d[0] not in '._']
for filename in filenames:
if filename == "__init__.py":
continue
if os.path.splitext(_lowercase )[1] in (".py", ".ipynb"):
yield os.path.join(_lowercase , _lowercase ).lstrip("""./""" )
def __lowerCamelCase ( _lowercase ) -> List[str]:
return F'''{i * ' '}*''' if i else "\n##"
def __lowerCamelCase ( _lowercase , _lowercase ) -> str:
UpperCAmelCase : Optional[Any] = old_path.split(os.sep )
for i, new_part in enumerate(new_path.split(os.sep ) ):
if (i + 1 > len(_lowercase ) or old_parts[i] != new_part) and new_part:
print(F'''{md_prefix(_lowercase )} {new_part.replace('_' , ' ' ).title()}''' )
return new_path
def __lowerCamelCase ( _lowercase = "." ) -> None:
UpperCAmelCase : Dict = ''
for filepath in sorted(good_file_paths(_lowercase ) ):
UpperCAmelCase : Optional[Any] = os.path.split(_lowercase )
if filepath != old_path:
UpperCAmelCase : Dict = print_path(_lowercase , _lowercase )
UpperCAmelCase : Optional[int] = (filepath.count(os.sep ) + 1) if filepath else 0
UpperCAmelCase : Dict = F'''{filepath}/{filename}'''.replace(""" """ , """%20""" )
UpperCAmelCase : Optional[int] = os.path.splitext(filename.replace("""_""" , """ """ ).title() )[0]
print(F'''{md_prefix(_lowercase )} [{filename}]({url})''' )
if __name__ == "__main__":
print_directory_md(""".""")
| 704 |
'''simple docstring'''
a : List[Any] = """Alexander Joslin"""
import operator as op
from .stack import Stack
def __lowerCamelCase ( _lowercase ) -> int:
UpperCAmelCase : Dict = {"""*""": op.mul, """/""": op.truediv, """+""": op.add, """-""": op.sub}
UpperCAmelCase : Stack[int] = Stack()
UpperCAmelCase : Stack[str] = Stack()
for i in equation:
if i.isdigit():
# RULE 1
operand_stack.push(int(_lowercase ) )
elif i in operators:
# RULE 2
operator_stack.push(_lowercase )
elif i == ")":
# RULE 4
UpperCAmelCase : List[Any] = operator_stack.peek()
operator_stack.pop()
UpperCAmelCase : str = operand_stack.peek()
operand_stack.pop()
UpperCAmelCase : str = operand_stack.peek()
operand_stack.pop()
UpperCAmelCase : List[Any] = operators[opr](_lowercase , _lowercase )
operand_stack.push(_lowercase )
# RULE 5
return operand_stack.peek()
if __name__ == "__main__":
a : Tuple = """(5 + ((4 * 2) * (2 + 3)))"""
# answer = 45
print(F'''{equation} = {dijkstras_two_stack_algorithm(equation)}''')
| 672 | 0 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
a : Union[str, Any] = logging.get_logger(__name__)
a : Dict = {
"kssteven/ibert-roberta-base": "https://huggingface.co/kssteven/ibert-roberta-base/resolve/main/config.json",
"kssteven/ibert-roberta-large": "https://huggingface.co/kssteven/ibert-roberta-large/resolve/main/config.json",
"kssteven/ibert-roberta-large-mnli": (
"https://huggingface.co/kssteven/ibert-roberta-large-mnli/resolve/main/config.json"
),
}
class UpperCamelCase_ ( lowercase__ ):
lowercase = 'ibert'
def __init__( self , A=30522 , A=768 , A=12 , A=12 , A=3072 , A="gelu" , A=0.1 , A=0.1 , A=512 , A=2 , A=0.0_2 , A=1e-12 , A=1 , A=0 , A=2 , A="absolute" , A=False , A="none" , **A , ) -> Any:
super().__init__(pad_token_id=__lowerCamelCase , bos_token_id=__lowerCamelCase , eos_token_id=__lowerCamelCase , **__lowerCamelCase )
UpperCAmelCase : Any = vocab_size
UpperCAmelCase : Optional[Any] = hidden_size
UpperCAmelCase : Optional[int] = num_hidden_layers
UpperCAmelCase : int = num_attention_heads
UpperCAmelCase : List[str] = hidden_act
UpperCAmelCase : List[str] = intermediate_size
UpperCAmelCase : Optional[int] = hidden_dropout_prob
UpperCAmelCase : Any = attention_probs_dropout_prob
UpperCAmelCase : Tuple = max_position_embeddings
UpperCAmelCase : Any = type_vocab_size
UpperCAmelCase : Optional[int] = initializer_range
UpperCAmelCase : Tuple = layer_norm_eps
UpperCAmelCase : int = position_embedding_type
UpperCAmelCase : List[str] = quant_mode
UpperCAmelCase : int = force_dequant
class UpperCamelCase_ ( lowercase__ ):
@property
def _lowercase( self ) -> List[Any]:
if self.task == "multiple-choice":
UpperCAmelCase : Any = {0: "batch", 1: "choice", 2: "sequence"}
else:
UpperCAmelCase : Any = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("""input_ids""", dynamic_axis),
("""attention_mask""", dynamic_axis),
] )
| 705 |
'''simple docstring'''
import argparse
import json
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
ConditionalDetrConfig,
ConditionalDetrForObjectDetection,
ConditionalDetrForSegmentation,
ConditionalDetrImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
a : List[Any] = logging.get_logger(__name__)
# here we list all keys to be renamed (original name on the left, our name on the right)
a : List[str] = []
for i in range(6):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(F'''transformer.encoder.layers.{i}.self_attn.out_proj.weight''', F'''encoder.layers.{i}.self_attn.out_proj.weight''')
)
rename_keys.append(
(F'''transformer.encoder.layers.{i}.self_attn.out_proj.bias''', F'''encoder.layers.{i}.self_attn.out_proj.bias''')
)
rename_keys.append((F'''transformer.encoder.layers.{i}.linear1.weight''', F'''encoder.layers.{i}.fc1.weight'''))
rename_keys.append((F'''transformer.encoder.layers.{i}.linear1.bias''', F'''encoder.layers.{i}.fc1.bias'''))
rename_keys.append((F'''transformer.encoder.layers.{i}.linear2.weight''', F'''encoder.layers.{i}.fc2.weight'''))
rename_keys.append((F'''transformer.encoder.layers.{i}.linear2.bias''', F'''encoder.layers.{i}.fc2.bias'''))
rename_keys.append(
(F'''transformer.encoder.layers.{i}.norm1.weight''', F'''encoder.layers.{i}.self_attn_layer_norm.weight''')
)
rename_keys.append((F'''transformer.encoder.layers.{i}.norm1.bias''', F'''encoder.layers.{i}.self_attn_layer_norm.bias'''))
rename_keys.append((F'''transformer.encoder.layers.{i}.norm2.weight''', F'''encoder.layers.{i}.final_layer_norm.weight'''))
rename_keys.append((F'''transformer.encoder.layers.{i}.norm2.bias''', F'''encoder.layers.{i}.final_layer_norm.bias'''))
# decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms
rename_keys.append(
(F'''transformer.decoder.layers.{i}.self_attn.out_proj.weight''', F'''decoder.layers.{i}.self_attn.out_proj.weight''')
)
rename_keys.append(
(F'''transformer.decoder.layers.{i}.self_attn.out_proj.bias''', F'''decoder.layers.{i}.self_attn.out_proj.bias''')
)
rename_keys.append(
(
F'''transformer.decoder.layers.{i}.cross_attn.out_proj.weight''',
F'''decoder.layers.{i}.encoder_attn.out_proj.weight''',
)
)
rename_keys.append(
(
F'''transformer.decoder.layers.{i}.cross_attn.out_proj.bias''',
F'''decoder.layers.{i}.encoder_attn.out_proj.bias''',
)
)
rename_keys.append((F'''transformer.decoder.layers.{i}.linear1.weight''', F'''decoder.layers.{i}.fc1.weight'''))
rename_keys.append((F'''transformer.decoder.layers.{i}.linear1.bias''', F'''decoder.layers.{i}.fc1.bias'''))
rename_keys.append((F'''transformer.decoder.layers.{i}.linear2.weight''', F'''decoder.layers.{i}.fc2.weight'''))
rename_keys.append((F'''transformer.decoder.layers.{i}.linear2.bias''', F'''decoder.layers.{i}.fc2.bias'''))
rename_keys.append(
(F'''transformer.decoder.layers.{i}.norm1.weight''', F'''decoder.layers.{i}.self_attn_layer_norm.weight''')
)
rename_keys.append((F'''transformer.decoder.layers.{i}.norm1.bias''', F'''decoder.layers.{i}.self_attn_layer_norm.bias'''))
rename_keys.append(
(F'''transformer.decoder.layers.{i}.norm2.weight''', F'''decoder.layers.{i}.encoder_attn_layer_norm.weight''')
)
rename_keys.append(
(F'''transformer.decoder.layers.{i}.norm2.bias''', F'''decoder.layers.{i}.encoder_attn_layer_norm.bias''')
)
rename_keys.append((F'''transformer.decoder.layers.{i}.norm3.weight''', F'''decoder.layers.{i}.final_layer_norm.weight'''))
rename_keys.append((F'''transformer.decoder.layers.{i}.norm3.bias''', F'''decoder.layers.{i}.final_layer_norm.bias'''))
# q, k, v projections in self/cross-attention in decoder for conditional DETR
rename_keys.append(
(F'''transformer.decoder.layers.{i}.sa_qcontent_proj.weight''', F'''decoder.layers.{i}.sa_qcontent_proj.weight''')
)
rename_keys.append(
(F'''transformer.decoder.layers.{i}.sa_kcontent_proj.weight''', F'''decoder.layers.{i}.sa_kcontent_proj.weight''')
)
rename_keys.append(
(F'''transformer.decoder.layers.{i}.sa_qpos_proj.weight''', F'''decoder.layers.{i}.sa_qpos_proj.weight''')
)
rename_keys.append(
(F'''transformer.decoder.layers.{i}.sa_kpos_proj.weight''', F'''decoder.layers.{i}.sa_kpos_proj.weight''')
)
rename_keys.append((F'''transformer.decoder.layers.{i}.sa_v_proj.weight''', F'''decoder.layers.{i}.sa_v_proj.weight'''))
rename_keys.append(
(F'''transformer.decoder.layers.{i}.ca_qcontent_proj.weight''', F'''decoder.layers.{i}.ca_qcontent_proj.weight''')
)
# rename_keys.append((f"transformer.decoder.layers.{i}.ca_qpos_proj.weight", f"decoder.layers.{i}.ca_qpos_proj.weight"))
rename_keys.append(
(F'''transformer.decoder.layers.{i}.ca_kcontent_proj.weight''', F'''decoder.layers.{i}.ca_kcontent_proj.weight''')
)
rename_keys.append(
(F'''transformer.decoder.layers.{i}.ca_kpos_proj.weight''', F'''decoder.layers.{i}.ca_kpos_proj.weight''')
)
rename_keys.append((F'''transformer.decoder.layers.{i}.ca_v_proj.weight''', F'''decoder.layers.{i}.ca_v_proj.weight'''))
rename_keys.append(
(F'''transformer.decoder.layers.{i}.ca_qpos_sine_proj.weight''', F'''decoder.layers.{i}.ca_qpos_sine_proj.weight''')
)
rename_keys.append(
(F'''transformer.decoder.layers.{i}.sa_qcontent_proj.bias''', F'''decoder.layers.{i}.sa_qcontent_proj.bias''')
)
rename_keys.append(
(F'''transformer.decoder.layers.{i}.sa_kcontent_proj.bias''', F'''decoder.layers.{i}.sa_kcontent_proj.bias''')
)
rename_keys.append((F'''transformer.decoder.layers.{i}.sa_qpos_proj.bias''', F'''decoder.layers.{i}.sa_qpos_proj.bias'''))
rename_keys.append((F'''transformer.decoder.layers.{i}.sa_kpos_proj.bias''', F'''decoder.layers.{i}.sa_kpos_proj.bias'''))
rename_keys.append((F'''transformer.decoder.layers.{i}.sa_v_proj.bias''', F'''decoder.layers.{i}.sa_v_proj.bias'''))
rename_keys.append(
(F'''transformer.decoder.layers.{i}.ca_qcontent_proj.bias''', F'''decoder.layers.{i}.ca_qcontent_proj.bias''')
)
# rename_keys.append((f"transformer.decoder.layers.{i}.ca_qpos_proj.bias", f"decoder.layers.{i}.ca_qpos_proj.bias"))
rename_keys.append(
(F'''transformer.decoder.layers.{i}.ca_kcontent_proj.bias''', F'''decoder.layers.{i}.ca_kcontent_proj.bias''')
)
rename_keys.append((F'''transformer.decoder.layers.{i}.ca_kpos_proj.bias''', F'''decoder.layers.{i}.ca_kpos_proj.bias'''))
rename_keys.append((F'''transformer.decoder.layers.{i}.ca_v_proj.bias''', F'''decoder.layers.{i}.ca_v_proj.bias'''))
rename_keys.append(
(F'''transformer.decoder.layers.{i}.ca_qpos_sine_proj.bias''', F'''decoder.layers.{i}.ca_qpos_sine_proj.bias''')
)
# convolutional projection + query embeddings + layernorm of decoder + class and bounding box heads
# for conditional DETR, also convert reference point head and query scale MLP
rename_keys.extend(
[
("""input_proj.weight""", """input_projection.weight"""),
("""input_proj.bias""", """input_projection.bias"""),
("""query_embed.weight""", """query_position_embeddings.weight"""),
("""transformer.decoder.norm.weight""", """decoder.layernorm.weight"""),
("""transformer.decoder.norm.bias""", """decoder.layernorm.bias"""),
("""class_embed.weight""", """class_labels_classifier.weight"""),
("""class_embed.bias""", """class_labels_classifier.bias"""),
("""bbox_embed.layers.0.weight""", """bbox_predictor.layers.0.weight"""),
("""bbox_embed.layers.0.bias""", """bbox_predictor.layers.0.bias"""),
("""bbox_embed.layers.1.weight""", """bbox_predictor.layers.1.weight"""),
("""bbox_embed.layers.1.bias""", """bbox_predictor.layers.1.bias"""),
("""bbox_embed.layers.2.weight""", """bbox_predictor.layers.2.weight"""),
("""bbox_embed.layers.2.bias""", """bbox_predictor.layers.2.bias"""),
("""transformer.decoder.ref_point_head.layers.0.weight""", """decoder.ref_point_head.layers.0.weight"""),
("""transformer.decoder.ref_point_head.layers.0.bias""", """decoder.ref_point_head.layers.0.bias"""),
("""transformer.decoder.ref_point_head.layers.1.weight""", """decoder.ref_point_head.layers.1.weight"""),
("""transformer.decoder.ref_point_head.layers.1.bias""", """decoder.ref_point_head.layers.1.bias"""),
("""transformer.decoder.query_scale.layers.0.weight""", """decoder.query_scale.layers.0.weight"""),
("""transformer.decoder.query_scale.layers.0.bias""", """decoder.query_scale.layers.0.bias"""),
("""transformer.decoder.query_scale.layers.1.weight""", """decoder.query_scale.layers.1.weight"""),
("""transformer.decoder.query_scale.layers.1.bias""", """decoder.query_scale.layers.1.bias"""),
("""transformer.decoder.layers.0.ca_qpos_proj.weight""", """decoder.layers.0.ca_qpos_proj.weight"""),
("""transformer.decoder.layers.0.ca_qpos_proj.bias""", """decoder.layers.0.ca_qpos_proj.bias"""),
]
)
def __lowerCamelCase ( _lowercase , _lowercase , _lowercase ) -> Optional[Any]:
UpperCAmelCase : List[str] = state_dict.pop(_lowercase )
UpperCAmelCase : List[str] = val
def __lowerCamelCase ( _lowercase ) -> Any:
UpperCAmelCase : Union[str, Any] = OrderedDict()
for key, value in state_dict.items():
if "backbone.0.body" in key:
UpperCAmelCase : List[str] = key.replace("""backbone.0.body""" , """backbone.conv_encoder.model""" )
UpperCAmelCase : Dict = value
else:
UpperCAmelCase : List[Any] = value
return new_state_dict
def __lowerCamelCase ( _lowercase , _lowercase=False ) -> Optional[int]:
UpperCAmelCase : Dict = """"""
if is_panoptic:
UpperCAmelCase : Tuple = """conditional_detr."""
# first: transformer encoder
for i in range(6 ):
# read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias)
UpperCAmelCase : List[Any] = state_dict.pop(F'''{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight''' )
UpperCAmelCase : List[Any] = state_dict.pop(F'''{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias''' )
# next, add query, keys and values (in that order) to the state dict
UpperCAmelCase : Dict = in_proj_weight[:2_5_6, :]
UpperCAmelCase : Optional[Any] = in_proj_bias[:2_5_6]
UpperCAmelCase : List[Any] = in_proj_weight[2_5_6:5_1_2, :]
UpperCAmelCase : Tuple = in_proj_bias[2_5_6:5_1_2]
UpperCAmelCase : List[str] = in_proj_weight[-2_5_6:, :]
UpperCAmelCase : List[str] = in_proj_bias[-2_5_6:]
def __lowerCamelCase ( ) -> Dict:
UpperCAmelCase : List[Any] = """http://images.cocodataset.org/val2017/000000039769.jpg"""
UpperCAmelCase : Tuple = Image.open(requests.get(_lowercase , stream=_lowercase ).raw )
return im
@torch.no_grad()
def __lowerCamelCase ( _lowercase , _lowercase ) -> str:
UpperCAmelCase : str = ConditionalDetrConfig()
# set backbone and dilation attributes
if "resnet101" in model_name:
UpperCAmelCase : List[Any] = """resnet101"""
if "dc5" in model_name:
UpperCAmelCase : Optional[int] = True
UpperCAmelCase : List[Any] = """panoptic""" in model_name
if is_panoptic:
UpperCAmelCase : Union[str, Any] = 2_5_0
else:
UpperCAmelCase : int = 9_1
UpperCAmelCase : Tuple = """huggingface/label-files"""
UpperCAmelCase : List[Any] = """coco-detection-id2label.json"""
UpperCAmelCase : Optional[int] = json.load(open(hf_hub_download(_lowercase , _lowercase , repo_type="""dataset""" ) , """r""" ) )
UpperCAmelCase : Dict = {int(_lowercase ): v for k, v in idalabel.items()}
UpperCAmelCase : Optional[Any] = idalabel
UpperCAmelCase : List[Any] = {v: k for k, v in idalabel.items()}
# load image processor
UpperCAmelCase : List[str] = """coco_panoptic""" if is_panoptic else """coco_detection"""
UpperCAmelCase : List[Any] = ConditionalDetrImageProcessor(format=_lowercase )
# prepare image
UpperCAmelCase : Union[str, Any] = prepare_img()
UpperCAmelCase : Dict = image_processor(images=_lowercase , return_tensors="""pt""" )
UpperCAmelCase : List[Any] = encoding["""pixel_values"""]
logger.info(F'''Converting model {model_name}...''' )
# load original model from torch hub
UpperCAmelCase : int = torch.hub.load("""DeppMeng/ConditionalDETR""" , _lowercase , pretrained=_lowercase ).eval()
UpperCAmelCase : List[Any] = conditional_detr.state_dict()
# rename keys
for src, dest in rename_keys:
if is_panoptic:
UpperCAmelCase : List[Any] = """conditional_detr.""" + src
rename_key(_lowercase , _lowercase , _lowercase )
UpperCAmelCase : List[Any] = rename_backbone_keys(_lowercase )
# query, key and value matrices need special treatment
read_in_q_k_v(_lowercase , is_panoptic=_lowercase )
# important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them
UpperCAmelCase : int = """conditional_detr.model.""" if is_panoptic else """model."""
for key in state_dict.copy().keys():
if is_panoptic:
if (
key.startswith("""conditional_detr""" )
and not key.startswith("""class_labels_classifier""" )
and not key.startswith("""bbox_predictor""" )
):
UpperCAmelCase : Union[str, Any] = state_dict.pop(_lowercase )
UpperCAmelCase : int = val
elif "class_labels_classifier" in key or "bbox_predictor" in key:
UpperCAmelCase : Any = state_dict.pop(_lowercase )
UpperCAmelCase : Optional[Any] = val
elif key.startswith("""bbox_attention""" ) or key.startswith("""mask_head""" ):
continue
else:
UpperCAmelCase : List[Any] = state_dict.pop(_lowercase )
UpperCAmelCase : str = val
else:
if not key.startswith("""class_labels_classifier""" ) and not key.startswith("""bbox_predictor""" ):
UpperCAmelCase : Optional[int] = state_dict.pop(_lowercase )
UpperCAmelCase : Union[str, Any] = val
# finally, create HuggingFace model and load state dict
UpperCAmelCase : List[Any] = ConditionalDetrForSegmentation(_lowercase ) if is_panoptic else ConditionalDetrForObjectDetection(_lowercase )
model.load_state_dict(_lowercase )
model.eval()
model.push_to_hub(repo_id=_lowercase , organization="""DepuMeng""" , commit_message="""Add model""" )
# verify our conversion
UpperCAmelCase : Union[str, Any] = conditional_detr(_lowercase )
UpperCAmelCase : int = model(_lowercase )
assert torch.allclose(outputs.logits , original_outputs["""pred_logits"""] , atol=1e-4 )
assert torch.allclose(outputs.pred_boxes , original_outputs["""pred_boxes"""] , atol=1e-4 )
if is_panoptic:
assert torch.allclose(outputs.pred_masks , original_outputs["""pred_masks"""] , atol=1e-4 )
# Save model and image processor
logger.info(F'''Saving PyTorch model and image processor to {pytorch_dump_folder_path}...''' )
Path(_lowercase ).mkdir(exist_ok=_lowercase )
model.save_pretrained(_lowercase )
image_processor.save_pretrained(_lowercase )
if __name__ == "__main__":
a : Tuple = argparse.ArgumentParser()
parser.add_argument(
"""--model_name""",
default="""conditional_detr_resnet50""",
type=str,
help="""Name of the CONDITIONAL_DETR model you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the folder to output PyTorch model."""
)
a : Optional[Any] = parser.parse_args()
convert_conditional_detr_checkpoint(args.model_name, args.pytorch_dump_folder_path)
| 672 | 0 |
import argparse
import torch
from transformers import BertConfig, BertForPreTraining, load_tf_weights_in_bert
from transformers.utils import logging
logging.set_verbosity_info()
def __lowerCamelCase ( _lowercase , _lowercase , _lowercase ) -> str:
# Initialise PyTorch model
UpperCAmelCase : Optional[int] = BertConfig.from_json_file(lowerCamelCase_ )
print(F'''Building PyTorch model from configuration: {config}''' )
UpperCAmelCase : str = BertForPreTraining(lowerCamelCase_ )
# Load weights from tf checkpoint
load_tf_weights_in_bert(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
# Save pytorch-model
print(F'''Save PyTorch model to {pytorch_dump_path}''' )
torch.save(model.state_dict() , lowerCamelCase_ )
if __name__ == "__main__":
a : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--tf_checkpoint_path""", default=None, type=str, required=True, help="""Path to the TensorFlow checkpoint path."""
)
parser.add_argument(
"""--bert_config_file""",
default=None,
type=str,
required=True,
help=(
"""The config json file corresponding to the pre-trained BERT model. \n"""
"""This specifies the model architecture."""
),
)
parser.add_argument(
"""--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
a : str = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.bert_config_file, args.pytorch_dump_path)
| 706 |
'''simple docstring'''
from ...utils import is_torch_available, is_transformers_available
if is_transformers_available() and is_torch_available():
from .pipeline_vq_diffusion import LearnedClassifierFreeSamplingEmbeddings, VQDiffusionPipeline
| 672 | 0 |
'''simple docstring'''
from typing import List, Union
import numpy as np
from ..tokenization_utils import TruncationStrategy
from ..utils import add_end_docstrings, logging
from .base import PIPELINE_INIT_ARGS, ArgumentHandler, ChunkPipeline
a : Dict = logging.get_logger(__name__)
class UpperCamelCase_ ( UpperCAmelCase_ ):
def _lowercase( self , A ) -> Optional[int]:
if isinstance(_lowercase , _lowercase ):
UpperCAmelCase : Tuple = [label.strip() for label in labels.split(""",""" ) if label.strip()]
return labels
def __call__( self , A , A , A ) -> Dict:
if len(_lowercase ) == 0 or len(_lowercase ) == 0:
raise ValueError("""You must include at least one label and at least one sequence.""" )
if hypothesis_template.format(labels[0] ) == hypothesis_template:
raise ValueError(
(
"""The provided hypothesis_template \"{}\" was not able to be formatted with the target labels. """
"""Make sure the passed template includes formatting syntax such as {{}} where the label should go."""
).format(_lowercase ) )
if isinstance(_lowercase , _lowercase ):
UpperCAmelCase : List[Any] = [sequences]
UpperCAmelCase : List[Any] = []
for sequence in sequences:
sequence_pairs.extend([[sequence, hypothesis_template.format(_lowercase )] for label in labels] )
return sequence_pairs, sequences
@add_end_docstrings(UpperCAmelCase_ )
class UpperCamelCase_ ( UpperCAmelCase_ ):
def __init__( self , A=ZeroShotClassificationArgumentHandler() , *A , **A ) -> Union[str, Any]:
UpperCAmelCase : Dict = args_parser
super().__init__(*_lowercase , **_lowercase )
if self.entailment_id == -1:
logger.warning(
"""Failed to determine \'entailment\' label id from the label2id mapping in the model config. Setting to """
"""-1. Define a descriptive label2id mapping in the model config to ensure correct outputs.""" )
@property
def _lowercase( self ) -> Optional[int]:
for label, ind in self.model.config.labelaid.items():
if label.lower().startswith("""entail""" ):
return ind
return -1
def _lowercase( self , A , A=True , A=True , A=TruncationStrategy.ONLY_FIRST , **A ) -> str:
UpperCAmelCase : int = self.framework
if self.tokenizer.pad_token is None:
# Override for tokenizers not supporting padding
logger.error(
"""Tokenizer was not supporting padding necessary for zero-shot, attempting to use """
""" `pad_token=eos_token`""" )
UpperCAmelCase : Dict = self.tokenizer.eos_token
try:
UpperCAmelCase : str = self.tokenizer(
_lowercase , add_special_tokens=_lowercase , return_tensors=_lowercase , padding=_lowercase , truncation=_lowercase , )
except Exception as e:
if "too short" in str(_lowercase ):
# tokenizers might yell that we want to truncate
# to a value that is not even reached by the input.
# In that case we don't want to truncate.
# It seems there's not a really better way to catch that
# exception.
UpperCAmelCase : Tuple = self.tokenizer(
_lowercase , add_special_tokens=_lowercase , return_tensors=_lowercase , padding=_lowercase , truncation=TruncationStrategy.DO_NOT_TRUNCATE , )
else:
raise e
return inputs
def _lowercase( self , **A ) -> Dict:
if kwargs.get("""multi_class""" , _lowercase ) is not None:
UpperCAmelCase : List[str] = kwargs['multi_class']
logger.warning(
"""The `multi_class` argument has been deprecated and renamed to `multi_label`. """
"""`multi_class` will be removed in a future version of Transformers.""" )
UpperCAmelCase : int = {}
if "candidate_labels" in kwargs:
UpperCAmelCase : Optional[int] = self._args_parser._parse_labels(kwargs["""candidate_labels"""] )
if "hypothesis_template" in kwargs:
UpperCAmelCase : Dict = kwargs['hypothesis_template']
UpperCAmelCase : Union[str, Any] = {}
if "multi_label" in kwargs:
UpperCAmelCase : str = kwargs['multi_label']
return preprocess_params, {}, postprocess_params
def __call__( self , A , *A , **A , ) -> List[Any]:
if len(_lowercase ) == 0:
pass
elif len(_lowercase ) == 1 and "candidate_labels" not in kwargs:
UpperCAmelCase : str = args[0]
else:
raise ValueError(f'''Unable to understand extra arguments {args}''' )
return super().__call__(_lowercase , **_lowercase )
def _lowercase( self , A , A=None , A="This example is {}." ) -> Dict:
UpperCAmelCase : int = self._args_parser(_lowercase , _lowercase , _lowercase )
for i, (candidate_label, sequence_pair) in enumerate(zip(_lowercase , _lowercase ) ):
UpperCAmelCase : Any = self._parse_and_tokenize([sequence_pair] )
yield {
"candidate_label": candidate_label,
"sequence": sequences[0],
"is_last": i == len(_lowercase ) - 1,
**model_input,
}
def _lowercase( self , A ) -> int:
UpperCAmelCase : Optional[Any] = inputs['candidate_label']
UpperCAmelCase : Optional[int] = inputs['sequence']
UpperCAmelCase : Optional[int] = {k: inputs[k] for k in self.tokenizer.model_input_names}
UpperCAmelCase : List[str] = self.model(**_lowercase )
UpperCAmelCase : Any = {
'candidate_label': candidate_label,
'sequence': sequence,
'is_last': inputs['is_last'],
**outputs,
}
return model_outputs
def _lowercase( self , A , A=False ) -> Any:
UpperCAmelCase : List[str] = [outputs['candidate_label'] for outputs in model_outputs]
UpperCAmelCase : Dict = [outputs['sequence'] for outputs in model_outputs]
UpperCAmelCase : List[Any] = np.concatenate([output["""logits"""].numpy() for output in model_outputs] )
UpperCAmelCase : Tuple = logits.shape[0]
UpperCAmelCase : Tuple = len(_lowercase )
UpperCAmelCase : List[Any] = N // n
UpperCAmelCase : List[Any] = logits.reshape((num_sequences, n, -1) )
if multi_label or len(_lowercase ) == 1:
# softmax over the entailment vs. contradiction dim for each label independently
UpperCAmelCase : Tuple = self.entailment_id
UpperCAmelCase : Optional[int] = -1 if entailment_id == 0 else 0
UpperCAmelCase : List[Any] = reshaped_outputs[..., [contradiction_id, entailment_id]]
UpperCAmelCase : Any = np.exp(_lowercase ) / np.exp(_lowercase ).sum(-1 , keepdims=_lowercase )
UpperCAmelCase : Optional[Any] = scores[..., 1]
else:
# softmax the "entailment" logits over all candidate labels
UpperCAmelCase : Optional[int] = reshaped_outputs[..., self.entailment_id]
UpperCAmelCase : Optional[int] = np.exp(_lowercase ) / np.exp(_lowercase ).sum(-1 , keepdims=_lowercase )
UpperCAmelCase : Union[str, Any] = list(reversed(scores[0].argsort() ) )
return {
"sequence": sequences[0],
"labels": [candidate_labels[i] for i in top_inds],
"scores": scores[0, top_inds].tolist(),
}
| 707 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_barthez import BarthezTokenizer
else:
a : int = None
a : List[Any] = logging.get_logger(__name__)
a : Dict = {"""vocab_file""": """sentencepiece.bpe.model""", """tokenizer_file""": """tokenizer.json"""}
a : Union[str, Any] = {
"""vocab_file""": {
"""moussaKam/mbarthez""": """https://huggingface.co/moussaKam/mbarthez/resolve/main/sentencepiece.bpe.model""",
"""moussaKam/barthez""": """https://huggingface.co/moussaKam/barthez/resolve/main/sentencepiece.bpe.model""",
"""moussaKam/barthez-orangesum-title""": (
"""https://huggingface.co/moussaKam/barthez-orangesum-title/resolve/main/sentencepiece.bpe.model"""
),
},
"""tokenizer_file""": {
"""moussaKam/mbarthez""": """https://huggingface.co/moussaKam/mbarthez/resolve/main/tokenizer.json""",
"""moussaKam/barthez""": """https://huggingface.co/moussaKam/barthez/resolve/main/tokenizer.json""",
"""moussaKam/barthez-orangesum-title""": (
"""https://huggingface.co/moussaKam/barthez-orangesum-title/resolve/main/tokenizer.json"""
),
},
}
a : List[Any] = {
"""moussaKam/mbarthez""": 1_0_2_4,
"""moussaKam/barthez""": 1_0_2_4,
"""moussaKam/barthez-orangesum-title""": 1_0_2_4,
}
a : int = """▁"""
class UpperCamelCase_ ( __magic_name__ ):
lowercase = VOCAB_FILES_NAMES
lowercase = PRETRAINED_VOCAB_FILES_MAP
lowercase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase = ['input_ids', 'attention_mask']
lowercase = BarthezTokenizer
def __init__( self , A=None , A=None , A="<s>" , A="</s>" , A="</s>" , A="<s>" , A="<unk>" , A="<pad>" , A="<mask>" , **A , ) -> List[Any]:
# Mask token behave like a normal word, i.e. include the space before it
UpperCAmelCase : Tuple = AddedToken(A , lstrip=A , rstrip=A ) if isinstance(A , A ) else mask_token
super().__init__(
A , tokenizer_file=A , bos_token=A , eos_token=A , unk_token=A , sep_token=A , cls_token=A , pad_token=A , mask_token=A , **A , )
UpperCAmelCase : Union[str, Any] = vocab_file
UpperCAmelCase : int = False if not self.vocab_file else True
def _lowercase( self , A , A = None ) -> List[int]:
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
UpperCAmelCase : Optional[int] = [self.cls_token_id]
UpperCAmelCase : List[str] = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def _lowercase( self , A , A = None ) -> List[int]:
UpperCAmelCase : Optional[int] = [self.sep_token_id]
UpperCAmelCase : List[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def _lowercase( self , A , A = None ) -> Tuple[str]:
if not self.can_save_slow_tokenizer:
raise ValueError(
"""Your fast tokenizer does not have the necessary information to save the vocabulary for a slow """
"""tokenizer.""" )
if not os.path.isdir(A ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
UpperCAmelCase : str = os.path.join(
A , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(A ):
copyfile(self.vocab_file , A )
return (out_vocab_file,)
| 672 | 0 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a : List[str] = logging.get_logger(__name__)
a : Tuple = {
"""SCUT-DLVCLab/lilt-roberta-en-base""": (
"""https://huggingface.co/SCUT-DLVCLab/lilt-roberta-en-base/resolve/main/config.json"""
),
}
class UpperCamelCase_ ( __A ):
lowercase = 'lilt'
def __init__( self , A=30522 , A=768 , A=12 , A=12 , A=3072 , A="gelu" , A=0.1 , A=0.1 , A=512 , A=2 , A=0.0_2 , A=1e-12 , A=0 , A="absolute" , A=None , A=4 , A=1024 , **A , ) -> Optional[int]:
super().__init__(pad_token_id=A , **A )
UpperCAmelCase : List[Any] = vocab_size
UpperCAmelCase : Dict = hidden_size
UpperCAmelCase : Optional[Any] = num_hidden_layers
UpperCAmelCase : int = num_attention_heads
UpperCAmelCase : str = hidden_act
UpperCAmelCase : Optional[Any] = intermediate_size
UpperCAmelCase : Optional[int] = hidden_dropout_prob
UpperCAmelCase : List[Any] = attention_probs_dropout_prob
UpperCAmelCase : Union[str, Any] = max_position_embeddings
UpperCAmelCase : Optional[Any] = type_vocab_size
UpperCAmelCase : int = initializer_range
UpperCAmelCase : int = layer_norm_eps
UpperCAmelCase : Union[str, Any] = position_embedding_type
UpperCAmelCase : Dict = classifier_dropout
UpperCAmelCase : str = channel_shrink_ratio
UpperCAmelCase : Any = max_ad_position_embeddings
| 708 |
'''simple docstring'''
from collections.abc import Callable
import numpy as np
def __lowerCamelCase ( _lowercase , _lowercase , _lowercase , _lowercase , _lowercase ) -> np.array:
UpperCAmelCase : Optional[Any] = int(np.ceil((x_end - xa) / step_size ) )
UpperCAmelCase : str = np.zeros((n + 1,) )
UpperCAmelCase : Optional[Any] = ya
UpperCAmelCase : Union[str, Any] = xa
for k in range(_lowercase ):
UpperCAmelCase : Dict = y[k] + step_size * ode_func(_lowercase , y[k] )
UpperCAmelCase : Optional[int] = y[k] + (
(step_size / 2) * (ode_func(_lowercase , y[k] ) + ode_func(x + step_size , _lowercase ))
)
x += step_size
return y
if __name__ == "__main__":
import doctest
doctest.testmod()
| 672 | 0 |
'''simple docstring'''
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import flax
import jax.numpy as jnp
from jax import random
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .scheduling_utils_flax import FlaxSchedulerMixin
@flax.struct.dataclass
class UpperCamelCase_ :
lowercase = None
lowercase = None
lowercase = None # sigma(t_i)
@classmethod
def _lowercase( cls ) -> Union[str, Any]:
return cls()
@dataclass
class UpperCamelCase_ ( __magic_name__ ):
lowercase = 42
lowercase = 42
lowercase = 42
class UpperCamelCase_ ( __magic_name__ , __magic_name__ ):
@property
def _lowercase( self ) -> Any:
return True
@register_to_config
def __init__( self , A = 0.0_2 , A = 100 , A = 1.0_0_7 , A = 80 , A = 0.0_5 , A = 50 , ) -> Optional[Any]:
pass
def _lowercase( self ) -> str:
return KarrasVeSchedulerState.create()
def _lowercase( self , A , A , A = () ) -> List[Any]:
UpperCAmelCase : int = jnp.arange(0 , _lowerCAmelCase )[::-1].copy()
UpperCAmelCase : Dict = [
(
self.config.sigma_max**2
* (self.config.sigma_min**2 / self.config.sigma_max**2) ** (i / (num_inference_steps - 1))
)
for i in timesteps
]
return state.replace(
num_inference_steps=_lowerCAmelCase , schedule=jnp.array(_lowerCAmelCase , dtype=jnp.floataa ) , timesteps=_lowerCAmelCase , )
def _lowercase( self , A , A , A , A , ) -> int:
if self.config.s_min <= sigma <= self.config.s_max:
UpperCAmelCase : Dict = min(self.config.s_churn / state.num_inference_steps , 2**0.5 - 1 )
else:
UpperCAmelCase : Union[str, Any] = 0
# sample eps ~ N(0, S_noise^2 * I)
UpperCAmelCase : List[Any] = random.split(_lowerCAmelCase , num=1 )
UpperCAmelCase : int = self.config.s_noise * random.normal(key=_lowerCAmelCase , shape=sample.shape )
UpperCAmelCase : str = sigma + gamma * sigma
UpperCAmelCase : Any = sample + ((sigma_hat**2 - sigma**2) ** 0.5 * eps)
return sample_hat, sigma_hat
def _lowercase( self , A , A , A , A , A , A = True , ) -> Optional[Any]:
UpperCAmelCase : str = sample_hat + sigma_hat * model_output
UpperCAmelCase : str = (sample_hat - pred_original_sample) / sigma_hat
UpperCAmelCase : str = sample_hat + (sigma_prev - sigma_hat) * derivative
if not return_dict:
return (sample_prev, derivative, state)
return FlaxKarrasVeOutput(prev_sample=_lowerCAmelCase , derivative=_lowerCAmelCase , state=_lowerCAmelCase )
def _lowercase( self , A , A , A , A , A , A , A , A = True , ) -> int:
UpperCAmelCase : str = sample_prev + sigma_prev * model_output
UpperCAmelCase : Any = (sample_prev - pred_original_sample) / sigma_prev
UpperCAmelCase : Tuple = sample_hat + (sigma_prev - sigma_hat) * (0.5 * derivative + 0.5 * derivative_corr)
if not return_dict:
return (sample_prev, derivative, state)
return FlaxKarrasVeOutput(prev_sample=_lowerCAmelCase , derivative=_lowerCAmelCase , state=_lowerCAmelCase )
def _lowercase( self , A , A , A , A ) -> str:
raise NotImplementedError()
| 709 |
'''simple docstring'''
from typing import List, Union
import numpy as np
from ..tokenization_utils import TruncationStrategy
from ..utils import add_end_docstrings, logging
from .base import PIPELINE_INIT_ARGS, ArgumentHandler, ChunkPipeline
a : List[str] = logging.get_logger(__name__)
class UpperCamelCase_ ( __magic_name__ ):
def _lowercase( self , A ) -> Optional[int]:
if isinstance(A , A ):
UpperCAmelCase : Union[str, Any] = [label.strip() for label in labels.split(""",""" ) if label.strip()]
return labels
def __call__( self , A , A , A ) -> str:
if len(A ) == 0 or len(A ) == 0:
raise ValueError("""You must include at least one label and at least one sequence.""" )
if hypothesis_template.format(labels[0] ) == hypothesis_template:
raise ValueError(
(
"""The provided hypothesis_template \"{}\" was not able to be formatted with the target labels. """
"""Make sure the passed template includes formatting syntax such as {{}} where the label should go."""
).format(A ) )
if isinstance(A , A ):
UpperCAmelCase : Tuple = [sequences]
UpperCAmelCase : Optional[Any] = []
for sequence in sequences:
sequence_pairs.extend([[sequence, hypothesis_template.format(A )] for label in labels] )
return sequence_pairs, sequences
@add_end_docstrings(__magic_name__ )
class UpperCamelCase_ ( __magic_name__ ):
def __init__( self , A=ZeroShotClassificationArgumentHandler() , *A , **A ) -> Optional[int]:
UpperCAmelCase : Tuple = args_parser
super().__init__(*A , **A )
if self.entailment_id == -1:
logger.warning(
"""Failed to determine 'entailment' label id from the label2id mapping in the model config. Setting to """
"""-1. Define a descriptive label2id mapping in the model config to ensure correct outputs.""" )
@property
def _lowercase( self ) -> List[Any]:
for label, ind in self.model.config.labelaid.items():
if label.lower().startswith("""entail""" ):
return ind
return -1
def _lowercase( self , A , A=True , A=True , A=TruncationStrategy.ONLY_FIRST , **A ) -> str:
UpperCAmelCase : Tuple = self.framework
if self.tokenizer.pad_token is None:
# Override for tokenizers not supporting padding
logger.error(
"""Tokenizer was not supporting padding necessary for zero-shot, attempting to use """
""" `pad_token=eos_token`""" )
UpperCAmelCase : Any = self.tokenizer.eos_token
try:
UpperCAmelCase : Tuple = self.tokenizer(
A , add_special_tokens=A , return_tensors=A , padding=A , truncation=A , )
except Exception as e:
if "too short" in str(A ):
# tokenizers might yell that we want to truncate
# to a value that is not even reached by the input.
# In that case we don't want to truncate.
# It seems there's not a really better way to catch that
# exception.
UpperCAmelCase : List[str] = self.tokenizer(
A , add_special_tokens=A , return_tensors=A , padding=A , truncation=TruncationStrategy.DO_NOT_TRUNCATE , )
else:
raise e
return inputs
def _lowercase( self , **A ) -> Tuple:
if kwargs.get("""multi_class""" , A ) is not None:
UpperCAmelCase : Any = kwargs["""multi_class"""]
logger.warning(
"""The `multi_class` argument has been deprecated and renamed to `multi_label`. """
"""`multi_class` will be removed in a future version of Transformers.""" )
UpperCAmelCase : int = {}
if "candidate_labels" in kwargs:
UpperCAmelCase : Tuple = self._args_parser._parse_labels(kwargs["""candidate_labels"""] )
if "hypothesis_template" in kwargs:
UpperCAmelCase : List[Any] = kwargs["""hypothesis_template"""]
UpperCAmelCase : Dict = {}
if "multi_label" in kwargs:
UpperCAmelCase : Union[str, Any] = kwargs["""multi_label"""]
return preprocess_params, {}, postprocess_params
def __call__( self , A , *A , **A , ) -> Tuple:
if len(A ) == 0:
pass
elif len(A ) == 1 and "candidate_labels" not in kwargs:
UpperCAmelCase : Optional[Any] = args[0]
else:
raise ValueError(f'''Unable to understand extra arguments {args}''' )
return super().__call__(A , **A )
def _lowercase( self , A , A=None , A="This example is {}." ) -> List[Any]:
UpperCAmelCase , UpperCAmelCase : List[Any] = self._args_parser(A , A , A )
for i, (candidate_label, sequence_pair) in enumerate(zip(A , A ) ):
UpperCAmelCase : Any = self._parse_and_tokenize([sequence_pair] )
yield {
"candidate_label": candidate_label,
"sequence": sequences[0],
"is_last": i == len(A ) - 1,
**model_input,
}
def _lowercase( self , A ) -> Optional[int]:
UpperCAmelCase : Optional[Any] = inputs["""candidate_label"""]
UpperCAmelCase : Tuple = inputs["""sequence"""]
UpperCAmelCase : List[Any] = {k: inputs[k] for k in self.tokenizer.model_input_names}
UpperCAmelCase : Tuple = self.model(**A )
UpperCAmelCase : Optional[int] = {
"""candidate_label""": candidate_label,
"""sequence""": sequence,
"""is_last""": inputs["""is_last"""],
**outputs,
}
return model_outputs
def _lowercase( self , A , A=False ) -> List[str]:
UpperCAmelCase : Dict = [outputs["""candidate_label"""] for outputs in model_outputs]
UpperCAmelCase : List[Any] = [outputs["""sequence"""] for outputs in model_outputs]
UpperCAmelCase : List[Any] = np.concatenate([output["""logits"""].numpy() for output in model_outputs] )
UpperCAmelCase : Optional[Any] = logits.shape[0]
UpperCAmelCase : int = len(A )
UpperCAmelCase : List[Any] = N // n
UpperCAmelCase : int = logits.reshape((num_sequences, n, -1) )
if multi_label or len(A ) == 1:
# softmax over the entailment vs. contradiction dim for each label independently
UpperCAmelCase : str = self.entailment_id
UpperCAmelCase : str = -1 if entailment_id == 0 else 0
UpperCAmelCase : Optional[Any] = reshaped_outputs[..., [contradiction_id, entailment_id]]
UpperCAmelCase : int = np.exp(A ) / np.exp(A ).sum(-1 , keepdims=A )
UpperCAmelCase : int = scores[..., 1]
else:
# softmax the "entailment" logits over all candidate labels
UpperCAmelCase : Dict = reshaped_outputs[..., self.entailment_id]
UpperCAmelCase : Optional[int] = np.exp(A ) / np.exp(A ).sum(-1 , keepdims=A )
UpperCAmelCase : int = list(reversed(scores[0].argsort() ) )
return {
"sequence": sequences[0],
"labels": [candidate_labels[i] for i in top_inds],
"scores": scores[0, top_inds].tolist(),
}
| 672 | 0 |
'''simple docstring'''
def __lowerCamelCase ( _lowercase , _lowercase ) -> int:
while a != 0:
UpperCAmelCase : Optional[Any] = b % a, a
return b
def __lowerCamelCase ( _lowercase , _lowercase ) -> int:
if gcd(lowerCamelCase__ , lowerCamelCase__ ) != 1:
UpperCAmelCase : Any = F'''mod inverse of {a!r} and {m!r} does not exist'''
raise ValueError(lowerCamelCase__ )
UpperCAmelCase : str = 1, 0, a
UpperCAmelCase : Union[str, Any] = 0, 1, m
while va != 0:
UpperCAmelCase : Any = ua // va
UpperCAmelCase : List[Any] = (ua - q * va), (ua - q * va), (ua - q * va), va, va, va
return ua % m
| 710 |
'''simple docstring'''
import unittest
from transformers import PegasusTokenizer, PegasusTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
a : List[Any] = get_tests_dir("""fixtures/test_sentencepiece_no_bos.model""")
@require_sentencepiece
@require_tokenizers
class UpperCamelCase_ ( __magic_name__ , unittest.TestCase ):
lowercase = PegasusTokenizer
lowercase = PegasusTokenizerFast
lowercase = True
lowercase = True
def _lowercase( self ) -> Tuple:
super().setUp()
# We have a SentencePiece fixture for testing
UpperCAmelCase : int = PegasusTokenizer(A )
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def _lowercase( self ) -> int:
return PegasusTokenizer.from_pretrained("""google/pegasus-large""" )
def _lowercase( self , **A ) -> PegasusTokenizer:
return PegasusTokenizer.from_pretrained(self.tmpdirname , **A )
def _lowercase( self , A ) -> List[str]:
return ("This is a test", "This is a test")
def _lowercase( self ) -> Optional[Any]:
UpperCAmelCase : Optional[int] = """</s>"""
UpperCAmelCase : Optional[int] = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(A ) , A )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(A ) , A )
def _lowercase( self ) -> Tuple:
UpperCAmelCase : str = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """<pad>""" )
self.assertEqual(vocab_keys[1] , """</s>""" )
self.assertEqual(vocab_keys[-1] , """v""" )
self.assertEqual(len(A ) , 1103 )
def _lowercase( self ) -> Optional[Any]:
self.assertEqual(self.get_tokenizer().vocab_size , 1103 )
def _lowercase( self ) -> int:
UpperCAmelCase : Any = self.rust_tokenizer_class.from_pretrained(self.tmpdirname )
UpperCAmelCase : Optional[Any] = self.tokenizer_class.from_pretrained(self.tmpdirname )
UpperCAmelCase : Union[str, Any] = (
"""Let's see which <unk> is the better <unk_token_11> one <mask_1> It seems like this <mask_2> was important"""
""" </s> <pad> <pad> <pad>"""
)
UpperCAmelCase : Optional[Any] = rust_tokenizer([raw_input_str] , return_tensors=A , add_special_tokens=A ).input_ids[0]
UpperCAmelCase : Optional[Any] = py_tokenizer([raw_input_str] , return_tensors=A , add_special_tokens=A ).input_ids[0]
self.assertListEqual(A , A )
def _lowercase( self ) -> Optional[Any]:
UpperCAmelCase : List[Any] = self._large_tokenizer
# <mask_1> masks whole sentence while <mask_2> masks single word
UpperCAmelCase : Any = """<mask_1> To ensure a <mask_2> flow of bank resolutions."""
UpperCAmelCase : Optional[Any] = [2, 413, 615, 114, 3, 1971, 113, 1679, 10710, 107, 1]
UpperCAmelCase : Optional[Any] = tokenizer([raw_input_str] , return_tensors=A ).input_ids[0]
self.assertListEqual(A , A )
def _lowercase( self ) -> int:
UpperCAmelCase : str = self._large_tokenizer
# The tracebacks for the following asserts are **better** without messages or self.assertEqual
assert tokenizer.vocab_size == 96103
assert tokenizer.pad_token_id == 0
assert tokenizer.eos_token_id == 1
assert tokenizer.offset == 103
assert tokenizer.unk_token_id == tokenizer.offset + 2 == 105
assert tokenizer.unk_token == "<unk>"
assert tokenizer.model_max_length == 1024
UpperCAmelCase : List[Any] = """To ensure a smooth flow of bank resolutions."""
UpperCAmelCase : Optional[int] = [413, 615, 114, 2291, 1971, 113, 1679, 10710, 107, 1]
UpperCAmelCase : Union[str, Any] = tokenizer([raw_input_str] , return_tensors=A ).input_ids[0]
self.assertListEqual(A , A )
assert tokenizer.convert_ids_to_tokens([0, 1, 2, 3] ) == ["<pad>", "</s>", "<mask_1>", "<mask_2>"]
@require_torch
def _lowercase( self ) -> Any:
UpperCAmelCase : int = ["""This is going to be way too long.""" * 150, """short example"""]
UpperCAmelCase : Optional[int] = ["""not super long but more than 5 tokens""", """tiny"""]
UpperCAmelCase : Tuple = self._large_tokenizer(A , padding=A , truncation=A , return_tensors="""pt""" )
UpperCAmelCase : List[Any] = self._large_tokenizer(
text_target=A , max_length=5 , padding=A , truncation=A , return_tensors="""pt""" )
assert batch.input_ids.shape == (2, 1024)
assert batch.attention_mask.shape == (2, 1024)
assert targets["input_ids"].shape == (2, 5)
assert len(A ) == 2 # input_ids, attention_mask.
@slow
def _lowercase( self ) -> List[str]:
# fmt: off
UpperCAmelCase : List[str] = {"""input_ids""": [[38979, 143, 18485, 606, 130, 26669, 87686, 121, 54189, 1129, 111, 26669, 87686, 121, 9114, 14787, 121, 13249, 158, 592, 956, 121, 14621, 31576, 143, 62613, 108, 9688, 930, 43430, 11562, 62613, 304, 108, 11443, 897, 108, 9314, 17415, 63399, 108, 11443, 7614, 18316, 118, 4284, 7148, 12430, 143, 1400, 25703, 158, 111, 4284, 7148, 11772, 143, 21297, 1064, 158, 122, 204, 3506, 1754, 1133, 14787, 1581, 115, 33224, 4482, 111, 1355, 110, 29173, 317, 50833, 108, 20147, 94665, 111, 77198, 107, 1], [110, 62613, 117, 638, 112, 1133, 121, 20098, 1355, 79050, 13872, 135, 1596, 53541, 1352, 141, 13039, 5542, 124, 302, 518, 111, 268, 2956, 115, 149, 4427, 107, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [139, 1235, 2799, 18289, 17780, 204, 109, 9474, 1296, 107, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=A , model_name="""google/bigbird-pegasus-large-arxiv""" , revision="""ba85d0851d708441f91440d509690f1ab6353415""" , )
@require_sentencepiece
@require_tokenizers
class UpperCamelCase_ ( __magic_name__ , unittest.TestCase ):
lowercase = PegasusTokenizer
lowercase = PegasusTokenizerFast
lowercase = True
lowercase = True
def _lowercase( self ) -> List[Any]:
super().setUp()
# We have a SentencePiece fixture for testing
UpperCAmelCase : int = PegasusTokenizer(A , offset=0 , mask_token_sent=A , mask_token="""[MASK]""" )
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def _lowercase( self ) -> Optional[Any]:
return PegasusTokenizer.from_pretrained("""google/bigbird-pegasus-large-arxiv""" )
def _lowercase( self , **A ) -> PegasusTokenizer:
return PegasusTokenizer.from_pretrained(self.tmpdirname , **A )
def _lowercase( self , A ) -> str:
return ("This is a test", "This is a test")
def _lowercase( self ) -> Union[str, Any]:
UpperCAmelCase : Dict = self.rust_tokenizer_class.from_pretrained(self.tmpdirname )
UpperCAmelCase : Any = self.tokenizer_class.from_pretrained(self.tmpdirname )
UpperCAmelCase : str = (
"""Let's see which <unk> is the better <unk_token> one [MASK] It seems like this [MASK] was important </s>"""
""" <pad> <pad> <pad>"""
)
UpperCAmelCase : List[str] = rust_tokenizer([raw_input_str] , return_tensors=A , add_special_tokens=A ).input_ids[0]
UpperCAmelCase : str = py_tokenizer([raw_input_str] , return_tensors=A , add_special_tokens=A ).input_ids[0]
self.assertListEqual(A , A )
@require_torch
def _lowercase( self ) -> Optional[int]:
UpperCAmelCase : Union[str, Any] = ["""This is going to be way too long.""" * 1000, """short example"""]
UpperCAmelCase : Any = ["""not super long but more than 5 tokens""", """tiny"""]
UpperCAmelCase : int = self._large_tokenizer(A , padding=A , truncation=A , return_tensors="""pt""" )
UpperCAmelCase : Optional[int] = self._large_tokenizer(
text_target=A , max_length=5 , padding=A , truncation=A , return_tensors="""pt""" )
assert batch.input_ids.shape == (2, 4096)
assert batch.attention_mask.shape == (2, 4096)
assert targets["input_ids"].shape == (2, 5)
assert len(A ) == 2 # input_ids, attention_mask.
def _lowercase( self ) -> int:
UpperCAmelCase : Union[str, Any] = (
"""This is an example string that is used to test the original TF implementation against the HF"""
""" implementation"""
)
UpperCAmelCase : Optional[Any] = self._large_tokenizer(A ).input_ids
self.assertListEqual(
A , [182, 117, 142, 587, 4211, 120, 117, 263, 112, 804, 109, 856, 25016, 3137, 464, 109, 26955, 3137, 1] , )
| 672 | 0 |
'''simple docstring'''
def __lowerCamelCase ( _lowercase = 6_0_0_8_5_1_4_7_5_1_4_3 ) -> Optional[int]:
try:
UpperCAmelCase : int = int(_lowerCAmelCase )
except (TypeError, ValueError):
raise TypeError("""Parameter n must be int or castable to int.""" )
if n <= 0:
raise ValueError("""Parameter n must be greater than or equal to one.""" )
UpperCAmelCase : Union[str, Any] = 2
UpperCAmelCase : str = 0
if n == 2:
return 2
while n > 2:
while n % i != 0:
i += 1
UpperCAmelCase : Union[str, Any] = i
while n % i == 0:
UpperCAmelCase : Optional[int] = n // i
i += 1
return int(_lowerCAmelCase )
if __name__ == "__main__":
print(F'''{solution() = }''')
| 711 |
'''simple docstring'''
import inspect
import unittest
from transformers import ConvNextVaConfig
from transformers.models.auto import get_values
from transformers.models.auto.modeling_auto import MODEL_FOR_BACKBONE_MAPPING_NAMES, MODEL_MAPPING_NAMES
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import ConvNextVaBackbone, ConvNextVaForImageClassification, ConvNextVaModel
from transformers.models.convnextva.modeling_convnextva import CONVNEXTV2_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class UpperCamelCase_ :
def __init__( self , A , A=13 , A=32 , A=3 , A=4 , A=[10, 20, 30, 40] , A=[2, 2, 3, 2] , A=True , A=True , A=37 , A="gelu" , A=10 , A=0.0_2 , A=["stage2", "stage3", "stage4"] , A=[2, 3, 4] , A=None , ) -> int:
UpperCAmelCase : str = parent
UpperCAmelCase : List[Any] = batch_size
UpperCAmelCase : Dict = image_size
UpperCAmelCase : Tuple = num_channels
UpperCAmelCase : Union[str, Any] = num_stages
UpperCAmelCase : Any = hidden_sizes
UpperCAmelCase : str = depths
UpperCAmelCase : Optional[int] = is_training
UpperCAmelCase : Union[str, Any] = use_labels
UpperCAmelCase : Any = intermediate_size
UpperCAmelCase : str = hidden_act
UpperCAmelCase : List[str] = num_labels
UpperCAmelCase : Tuple = initializer_range
UpperCAmelCase : Optional[Any] = out_features
UpperCAmelCase : List[str] = out_indices
UpperCAmelCase : Any = scope
def _lowercase( self ) -> Union[str, Any]:
UpperCAmelCase : Tuple = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCAmelCase : List[Any] = None
if self.use_labels:
UpperCAmelCase : Dict = ids_tensor([self.batch_size] , self.num_labels )
UpperCAmelCase : List[str] = self.get_config()
return config, pixel_values, labels
def _lowercase( self ) -> Optional[Any]:
return ConvNextVaConfig(
num_channels=self.num_channels , hidden_sizes=self.hidden_sizes , depths=self.depths , num_stages=self.num_stages , hidden_act=self.hidden_act , is_decoder=A , initializer_range=self.initializer_range , out_features=self.out_features , out_indices=self.out_indices , num_labels=self.num_labels , )
def _lowercase( self , A , A , A ) -> Optional[Any]:
UpperCAmelCase : int = ConvNextVaModel(config=A )
model.to(A )
model.eval()
UpperCAmelCase : List[Any] = model(A )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def _lowercase( self , A , A , A ) -> Any:
UpperCAmelCase : List[str] = ConvNextVaForImageClassification(A )
model.to(A )
model.eval()
UpperCAmelCase : int = model(A , labels=A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _lowercase( self , A , A , A ) -> Any:
UpperCAmelCase : Optional[Any] = ConvNextVaBackbone(config=A )
model.to(A )
model.eval()
UpperCAmelCase : Any = model(A )
# verify hidden states
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[1], 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , config.hidden_sizes[1:] )
# verify backbone works with out_features=None
UpperCAmelCase : Any = None
UpperCAmelCase : Optional[int] = ConvNextVaBackbone(config=A )
model.to(A )
model.eval()
UpperCAmelCase : int = model(A )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[-1], 1, 1] )
# verify channels
self.parent.assertEqual(len(model.channels ) , 1 )
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] )
def _lowercase( self ) -> List[str]:
UpperCAmelCase : Dict = self.prepare_config_and_inputs()
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : List[Any] = config_and_inputs
UpperCAmelCase : str = {"""pixel_values""": pixel_values}
return config, inputs_dict
def _lowercase( self ) -> List[Any]:
UpperCAmelCase : List[str] = self.prepare_config_and_inputs()
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : Optional[Any] = config_and_inputs
UpperCAmelCase : List[str] = {"""pixel_values""": pixel_values, """labels""": labels}
return config, inputs_dict
@require_torch
class UpperCamelCase_ ( __magic_name__ , __magic_name__ , unittest.TestCase ):
lowercase = (
(
ConvNextVaModel,
ConvNextVaForImageClassification,
ConvNextVaBackbone,
)
if is_torch_available()
else ()
)
lowercase = (
{'feature-extraction': ConvNextVaModel, 'image-classification': ConvNextVaForImageClassification}
if is_torch_available()
else {}
)
lowercase = False
lowercase = False
lowercase = False
lowercase = False
lowercase = False
def _lowercase( self ) -> Optional[int]:
UpperCAmelCase : Dict = ConvNextVaModelTester(self )
UpperCAmelCase : List[str] = ConfigTester(self , config_class=A , has_text_modality=A , hidden_size=37 )
def _lowercase( self ) -> int:
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def _lowercase( self ) -> List[str]:
return
@unittest.skip(reason="""ConvNextV2 does not use inputs_embeds""" )
def _lowercase( self ) -> Dict:
pass
@unittest.skip(reason="""ConvNextV2 does not support input and output embeddings""" )
def _lowercase( self ) -> Any:
pass
@unittest.skip(reason="""ConvNextV2 does not use feedforward chunking""" )
def _lowercase( self ) -> int:
pass
def _lowercase( self ) -> Dict:
if not self.model_tester.is_training:
return
for model_class in self.all_model_classes:
UpperCAmelCase , UpperCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs_with_labels()
UpperCAmelCase : Optional[int] = True
if model_class.__name__ in [
*get_values(A ),
*get_values(A ),
]:
continue
UpperCAmelCase : Any = model_class(A )
model.to(A )
model.train()
UpperCAmelCase : List[str] = self._prepare_for_class(A , A , return_labels=A )
UpperCAmelCase : List[str] = model(**A ).loss
loss.backward()
def _lowercase( self ) -> Tuple:
if not self.model_tester.is_training:
return
for model_class in self.all_model_classes:
UpperCAmelCase , UpperCAmelCase : Any = self.model_tester.prepare_config_and_inputs_with_labels()
UpperCAmelCase : List[str] = False
UpperCAmelCase : int = True
if (
model_class.__name__
in [*get_values(A ), *get_values(A )]
or not model_class.supports_gradient_checkpointing
):
continue
UpperCAmelCase : Dict = model_class(A )
model.to(A )
model.gradient_checkpointing_enable()
model.train()
UpperCAmelCase : Any = self._prepare_for_class(A , A , return_labels=A )
UpperCAmelCase : Any = model(**A ).loss
loss.backward()
def _lowercase( self ) -> Tuple:
UpperCAmelCase , UpperCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase : int = model_class(A )
UpperCAmelCase : List[str] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase : Tuple = [*signature.parameters.keys()]
UpperCAmelCase : Optional[Any] = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , A )
def _lowercase( self ) -> Optional[Any]:
UpperCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*A )
def _lowercase( self ) -> List[str]:
def check_hidden_states_output(A , A , A ):
UpperCAmelCase : Optional[Any] = model_class(A )
model.to(A )
model.eval()
with torch.no_grad():
UpperCAmelCase : Dict = model(**self._prepare_for_class(A , A ) )
UpperCAmelCase : Tuple = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
UpperCAmelCase : Optional[Any] = self.model_tester.num_stages
self.assertEqual(len(A ) , expected_num_stages + 1 )
# ConvNextV2's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
UpperCAmelCase , UpperCAmelCase : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase : str = True
check_hidden_states_output(A , A , A )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
UpperCAmelCase : int = True
check_hidden_states_output(A , A , A )
def _lowercase( self ) -> Tuple:
UpperCAmelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*A )
@slow
def _lowercase( self ) -> Any:
for model_name in CONVNEXTV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase : Tuple = ConvNextVaModel.from_pretrained(A )
self.assertIsNotNone(A )
def __lowerCamelCase ( ) -> Optional[int]:
UpperCAmelCase : Optional[int] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class UpperCamelCase_ ( unittest.TestCase ):
@cached_property
def _lowercase( self ) -> str:
return AutoImageProcessor.from_pretrained("""facebook/convnextv2-tiny-1k-224""" ) if is_vision_available() else None
@slow
def _lowercase( self ) -> List[Any]:
UpperCAmelCase : Any = ConvNextVaForImageClassification.from_pretrained("""facebook/convnextv2-tiny-1k-224""" ).to(A )
UpperCAmelCase : List[Any] = self.default_image_processor
UpperCAmelCase : Any = prepare_img()
UpperCAmelCase : Tuple = preprocessor(images=A , return_tensors="""pt""" ).to(A )
# forward pass
with torch.no_grad():
UpperCAmelCase : Optional[Any] = model(**A )
# verify the logits
UpperCAmelCase : Dict = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , A )
UpperCAmelCase : Dict = torch.tensor([0.9_9_9_6, 0.1_9_6_6, -0.4_3_8_6] ).to(A )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , A , atol=1e-4 ) )
| 672 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.