code
stringlengths 86
54.5k
| code_codestyle
int64 0
371
| style_context
stringlengths 87
49.2k
| style_context_codestyle
int64 0
349
| label
int64 0
1
|
---|---|---|---|---|
'''simple docstring'''
import pytest
from datasets import inspect_metric, list_metrics, load_metric
@pytest.fixture
def __lowerCamelCase ( __snake_case : Optional[int] ) -> Tuple:
"""simple docstring"""
monkeypatch.setattr("""datasets.utils.deprecation_utils._emitted_deprecation_warnings""", set() )
@pytest.fixture
def __lowerCamelCase ( __snake_case : List[str] ) -> Optional[Any]:
"""simple docstring"""
class lowerCamelCase :
'''simple docstring'''
def __init__( self : List[str] , lowerCAmelCase_ : List[str] ) -> int:
'''simple docstring'''
A__ : Any =metric_id
class lowerCamelCase :
'''simple docstring'''
__snake_case = [MetricMock(lowercase_ ) for metric_id in ['accuracy', 'mse', 'precision', 'codeparrot/apps_metric']]
def lowercase__ ( self : str ) -> Tuple:
'''simple docstring'''
return self._metrics
monkeypatch.setattr("""datasets.inspect.huggingface_hub""", HfhMock() )
@pytest.mark.parametrize(
"""func, args""", [(load_metric, ("""metrics/mse""",)), (list_metrics, ()), (inspect_metric, ("""metrics/mse""", """tmp_path"""))] )
def __lowerCamelCase ( __snake_case : Optional[Any], __snake_case : Tuple, __snake_case : Union[str, Any], __snake_case : Union[str, Any], __snake_case : Dict ) -> Optional[int]:
"""simple docstring"""
if "tmp_path" in args:
A__ : int =tuple(arg if arg != """tmp_path""" else tmp_path for arg in args )
with pytest.warns(__snake_case, match="""https://huggingface.co/docs/evaluate""" ):
func(*__snake_case )
| 134 |
'''simple docstring'''
from ..utils import DummyObject, requires_backends
class lowerCamelCase ( metaclass=lowercase_ ):
'''simple docstring'''
__snake_case = ['speech']
def __init__( self : Tuple , *lowerCAmelCase_ : List[str] , **lowerCAmelCase_ : Dict ) -> int:
'''simple docstring'''
requires_backends(self , ["""speech"""] )
class lowerCamelCase ( metaclass=lowercase_ ):
'''simple docstring'''
__snake_case = ['speech']
def __init__( self : Union[str, Any] , *lowerCAmelCase_ : Tuple , **lowerCAmelCase_ : List[str] ) -> str:
'''simple docstring'''
requires_backends(self , ["""speech"""] )
| 134 | 1 |
'''simple docstring'''
import logging
import os
from .state import PartialState
class __lowerCAmelCase ( logging.LoggerAdapter ):
"""simple docstring"""
@staticmethod
def snake_case__ ( lowerCAmelCase__ : Dict ) -> Union[str, Any]:
'''simple docstring'''
_UpperCamelCase = PartialState()
return not main_process_only or (main_process_only and state.is_main_process)
def snake_case__ ( self : Dict , lowerCAmelCase__ : List[str] , lowerCAmelCase__ : int , *lowerCAmelCase__ : Optional[int] , **lowerCAmelCase__ : List[Any] ) -> Dict:
'''simple docstring'''
if PartialState._shared_state == {}:
raise RuntimeError(
'''You must initialize the accelerate state by calling either `PartialState()` or `Accelerator()` before using the logging utility.''' )
_UpperCamelCase = kwargs.pop('''main_process_only''' , lowerCAmelCase__ )
_UpperCamelCase = kwargs.pop('''in_order''' , lowerCAmelCase__ )
if self.isEnabledFor(lowerCAmelCase__ ):
if self._should_log(lowerCAmelCase__ ):
_UpperCamelCase , _UpperCamelCase = self.process(lowerCAmelCase__ , lowerCAmelCase__ )
self.logger.log(lowerCAmelCase__ , lowerCAmelCase__ , *lowerCAmelCase__ , **lowerCAmelCase__ )
elif in_order:
_UpperCamelCase = PartialState()
for i in range(state.num_processes ):
if i == state.process_index:
_UpperCamelCase , _UpperCamelCase = self.process(lowerCAmelCase__ , lowerCAmelCase__ )
self.logger.log(lowerCAmelCase__ , lowerCAmelCase__ , *lowerCAmelCase__ , **lowerCAmelCase__ )
state.wait_for_everyone()
def a__ ( lowercase : str, lowercase : str = None ) -> Tuple:
"""simple docstring"""
if log_level is None:
_UpperCamelCase = os.environ.get('''ACCELERATE_LOG_LEVEL''', lowercase )
_UpperCamelCase = logging.getLogger(lowercase )
if log_level is not None:
logger.setLevel(log_level.upper() )
logger.root.setLevel(log_level.upper() )
return MultiProcessAdapter(lowercase, {} )
| 287 |
'''simple docstring'''
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowercase__ : Optional[Any] = {
'configuration_autoformer': [
'AUTOFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP',
'AutoformerConfig',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ : List[Any] = [
'AUTOFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'AutoformerForPrediction',
'AutoformerModel',
'AutoformerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_autoformer import (
AUTOFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
AutoformerConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_autoformer import (
AUTOFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
AutoformerForPrediction,
AutoformerModel,
AutoformerPreTrainedModel,
)
else:
import sys
lowercase__ : Tuple = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 287 | 1 |
import re
import string
import numpy as np
import datasets
__snake_case : str = """
Returns the rate at which the input predicted strings exactly match their references, ignoring any strings input as part of the regexes_to_ignore list.
"""
__snake_case : Any = """
Args:
predictions: List of predicted texts.
references: List of reference texts.
regexes_to_ignore: List, defaults to None. Regex expressions of characters to
ignore when calculating the exact matches. Note: these regexes are removed
from the input data before the changes based on the options below (e.g. ignore_case,
ignore_punctuation, ignore_numbers) are applied.
ignore_case: Boolean, defaults to False. If true, turns everything
to lowercase so that capitalization differences are ignored.
ignore_punctuation: Boolean, defaults to False. If true, removes all punctuation before
comparing predictions and references.
ignore_numbers: Boolean, defaults to False. If true, removes all punctuation before
comparing predictions and references.
Returns:
exact_match: Dictionary containing exact_match rate. Possible values are between 0.0 and 100.0, inclusive.
Examples:
>>> exact_match = datasets.load_metric(\"exact_match\")
>>> refs = [\"the cat\", \"theater\", \"YELLING\", \"agent007\"]
>>> preds = [\"cat?\", \"theater\", \"yelling\", \"agent\"]
>>> results = exact_match.compute(references=refs, predictions=preds)
>>> print(round(results[\"exact_match\"], 1))
25.0
>>> exact_match = datasets.load_metric(\"exact_match\")
>>> refs = [\"the cat\", \"theater\", \"YELLING\", \"agent007\"]
>>> preds = [\"cat?\", \"theater\", \"yelling\", \"agent\"]
>>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=[\"the \", \"yell\"], ignore_case=True, ignore_punctuation=True)
>>> print(round(results[\"exact_match\"], 1))
50.0
>>> exact_match = datasets.load_metric(\"exact_match\")
>>> refs = [\"the cat\", \"theater\", \"YELLING\", \"agent007\"]
>>> preds = [\"cat?\", \"theater\", \"yelling\", \"agent\"]
>>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=[\"the \", \"yell\", \"YELL\"], ignore_case=True, ignore_punctuation=True)
>>> print(round(results[\"exact_match\"], 1))
75.0
>>> exact_match = datasets.load_metric(\"exact_match\")
>>> refs = [\"the cat\", \"theater\", \"YELLING\", \"agent007\"]
>>> preds = [\"cat?\", \"theater\", \"yelling\", \"agent\"]
>>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=[\"the \", \"yell\", \"YELL\"], ignore_case=True, ignore_punctuation=True, ignore_numbers=True)
>>> print(round(results[\"exact_match\"], 1))
100.0
>>> exact_match = datasets.load_metric(\"exact_match\")
>>> refs = [\"The cat sat on the mat.\", \"Theaters are great.\", \"It's like comparing oranges and apples.\"]
>>> preds = [\"The cat sat on the mat?\", \"Theaters are great.\", \"It's like comparing apples and oranges.\"]
>>> results = exact_match.compute(references=refs, predictions=preds)
>>> print(round(results[\"exact_match\"], 1))
33.3
"""
__snake_case : Optional[Any] = """
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION, _KWARGS_DESCRIPTION )
class A__(datasets.Metric ):
"""simple docstring"""
def UpperCamelCase__ ( self ) -> Any:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Value("""string""" , id="""sequence""" ),
"""references""": datasets.Value("""string""" , id="""sequence""" ),
} ) , reference_urls=[] , )
def UpperCamelCase__ ( self , _lowercase , _lowercase , _lowercase=None , _lowercase=False , _lowercase=False , _lowercase=False , ) -> Dict:
if regexes_to_ignore is not None:
for s in regexes_to_ignore:
a_ : Dict = np.array([re.sub(_lowercase , """""" , _lowercase ) for x in predictions] )
a_ : Union[str, Any] = np.array([re.sub(_lowercase , """""" , _lowercase ) for x in references] )
else:
a_ : Dict = np.asarray(_lowercase )
a_ : Tuple = np.asarray(_lowercase )
if ignore_case:
a_ : Any = np.char.lower(_lowercase )
a_ : Any = np.char.lower(_lowercase )
if ignore_punctuation:
a_ : List[str] = string.punctuation.maketrans("""""" , """""" , string.punctuation )
a_ : Dict = np.char.translate(_lowercase , table=_lowercase )
a_ : Optional[Any] = np.char.translate(_lowercase , table=_lowercase )
if ignore_numbers:
a_ : Optional[Any] = string.digits.maketrans("""""" , """""" , string.digits )
a_ : Dict = np.char.translate(_lowercase , table=_lowercase )
a_ : Union[str, Any] = np.char.translate(_lowercase , table=_lowercase )
a_ : str = predictions == references
return {"exact_match": np.mean(_lowercase ) * 100}
| 248 |
import argparse
import logging
import os
import sys
import numpy as np
import onnxruntime
import torch
from bart_onnx.generation_onnx import BARTBeamSearchGenerator
from bart_onnx.reduce_onnx_size import remove_dup_initializers
import transformers
from transformers import BartForConditionalGeneration, BartTokenizer
logging.basicConfig(
format="""%(asctime)s | %(levelname)s | %(name)s | [%(filename)s:%(lineno)d] %(message)s""",
datefmt="""%Y-%m-%d %H:%M:%S""",
level=os.environ.get("""LOGLEVEL""", """INFO""").upper(),
stream=sys.stdout,
)
__snake_case : Any = logging.getLogger(__name__)
__snake_case : Any = {"""facebook/bart-base""": BartForConditionalGeneration}
__snake_case : Tuple = {"""facebook/bart-base""": BartTokenizer}
def _UpperCAmelCase ( ):
'''simple docstring'''
a_ : List[str] = argparse.ArgumentParser(description="""Export Bart model + Beam Search to ONNX graph.""")
parser.add_argument(
"""--validation_file""" , type=a__ , default=a__ , help="""A csv or a json file containing the validation data.""")
parser.add_argument(
"""--max_length""" , type=a__ , default=5 , help="""The maximum total input sequence length after tokenization.""" , )
parser.add_argument(
"""--num_beams""" , type=a__ , default=a__ , help=(
"""Number of beams to use for evaluation. This argument will be """
"""passed to ``model.generate``, which is used during ``evaluate`` and ``predict``."""
) , )
parser.add_argument(
"""--model_name_or_path""" , type=a__ , help="""Path to pretrained model or model identifier from huggingface.co/models.""" , required=a__ , )
parser.add_argument(
"""--config_name""" , type=a__ , default=a__ , help="""Pretrained config name or path if not the same as model_name""" , )
parser.add_argument(
"""--device""" , type=a__ , default="""cpu""" , help="""Device where the model will be run""" , )
parser.add_argument("""--output_file_path""" , type=a__ , default=a__ , help="""Where to store the final ONNX file.""")
a_ : Any = parser.parse_args()
return args
def _UpperCAmelCase ( a__ , a__="cpu"):
'''simple docstring'''
a_ : Optional[int] = model_dict[model_name].from_pretrained(a__).to(a__)
a_ : List[str] = tokenizer_dict[model_name].from_pretrained(a__)
if model_name in ["facebook/bart-base"]:
a_ : Tuple = 0
a_ : Optional[int] = None
a_ : Union[str, Any] = 0
return huggingface_model, tokenizer
def _UpperCAmelCase ( a__ , a__ , a__ , a__ , a__):
'''simple docstring'''
model.eval()
a_ : Optional[Any] = None
a_ : Optional[Any] = torch.jit.script(BARTBeamSearchGenerator(a__))
with torch.no_grad():
a_ : Any = """My friends are cool but they eat too many carbs."""
a_ : Dict = tokenizer([ARTICLE_TO_SUMMARIZE] , max_length=1_0_2_4 , return_tensors="""pt""").to(model.device)
a_ : Optional[int] = model.generate(
inputs["""input_ids"""] , attention_mask=inputs["""attention_mask"""] , num_beams=a__ , max_length=a__ , early_stopping=a__ , decoder_start_token_id=model.config.decoder_start_token_id , )
torch.onnx.export(
a__ , (
inputs["""input_ids"""],
inputs["""attention_mask"""],
num_beams,
max_length,
model.config.decoder_start_token_id,
) , a__ , opset_version=1_4 , input_names=["""input_ids""", """attention_mask""", """num_beams""", """max_length""", """decoder_start_token_id"""] , output_names=["""output_ids"""] , dynamic_axes={
"""input_ids""": {0: """batch""", 1: """seq"""},
"""output_ids""": {0: """batch""", 1: """seq_out"""},
} , example_outputs=a__ , )
logger.info("""Model exported to {}""".format(a__))
a_ : List[str] = remove_dup_initializers(os.path.abspath(a__))
logger.info("""Deduplicated and optimized model written to {}""".format(a__))
a_ : Union[str, Any] = onnxruntime.InferenceSession(a__)
a_ : Any = ort_sess.run(
a__ , {
"""input_ids""": inputs["""input_ids"""].cpu().numpy(),
"""attention_mask""": inputs["""attention_mask"""].cpu().numpy(),
"""num_beams""": np.array(a__),
"""max_length""": np.array(a__),
"""decoder_start_token_id""": np.array(model.config.decoder_start_token_id),
} , )
np.testing.assert_allclose(summary_ids.cpu().numpy() , ort_out[0] , rtol=1e-3 , atol=1e-3)
logger.info("""Model outputs from torch and ONNX Runtime are similar.""")
logger.info("""Success.""")
def _UpperCAmelCase ( ):
'''simple docstring'''
a_ : List[str] = parse_args()
a_ : str = 5
a_ : Union[str, Any] = 4
# Make one log on every process with the configuration for debugging.
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , level=logging.INFO , )
logger.setLevel(logging.INFO)
transformers.utils.logging.set_verbosity_error()
a_ : int = torch.device(args.device)
a_ , a_ : Optional[Any] = load_model_tokenizer(args.model_name_or_path , a__)
if model.config.decoder_start_token_id is None:
raise ValueError("""Make sure that `config.decoder_start_token_id` is correctly defined""")
model.to(a__)
if args.max_length:
a_ : List[str] = args.max_length
if args.num_beams:
a_ : Optional[Any] = args.num_beams
if args.output_file_path:
a_ : Optional[int] = args.output_file_path
else:
a_ : Tuple = """BART.onnx"""
logger.info("""Exporting model to ONNX""")
export_and_validate_model(a__ , a__ , a__ , a__ , a__)
if __name__ == "__main__":
main()
| 248 | 1 |
"""simple docstring"""
import json
import os
import torch
from diffusers import UNetaDModel
os.makedirs("hub/hopper-medium-v2/unet/hor32", exist_ok=True)
os.makedirs("hub/hopper-medium-v2/unet/hor128", exist_ok=True)
os.makedirs("hub/hopper-medium-v2/value_function", exist_ok=True)
def __snake_case ( SCREAMING_SNAKE_CASE__ : int ) -> Dict:
'''simple docstring'''
if hor == 128:
_UpperCAmelCase : List[str] = ("DownResnetBlock1D", "DownResnetBlock1D", "DownResnetBlock1D")
_UpperCAmelCase : Dict = (32, 128, 256)
_UpperCAmelCase : Optional[int] = ("UpResnetBlock1D", "UpResnetBlock1D")
elif hor == 32:
_UpperCAmelCase : Optional[Any] = ("DownResnetBlock1D", "DownResnetBlock1D", "DownResnetBlock1D", "DownResnetBlock1D")
_UpperCAmelCase : Union[str, Any] = (32, 64, 128, 256)
_UpperCAmelCase : Union[str, Any] = ("UpResnetBlock1D", "UpResnetBlock1D", "UpResnetBlock1D")
_UpperCAmelCase : Any = torch.load(f'/Users/bglickenhaus/Documents/diffuser/temporal_unet-hopper-mediumv2-hor{hor}.torch' )
_UpperCAmelCase : Optional[int] = model.state_dict()
_UpperCAmelCase : Optional[int] = {
"down_block_types": down_block_types,
"block_out_channels": block_out_channels,
"up_block_types": up_block_types,
"layers_per_block": 1,
"use_timestep_embedding": True,
"out_block_type": "OutConv1DBlock",
"norm_num_groups": 8,
"downsample_each_block": False,
"in_channels": 14,
"out_channels": 14,
"extra_in_channels": 0,
"time_embedding_type": "positional",
"flip_sin_to_cos": False,
"freq_shift": 1,
"sample_size": 65_536,
"mid_block_type": "MidResTemporalBlock1D",
"act_fn": "mish",
}
_UpperCAmelCase : Optional[int] = UNetaDModel(**a__ )
print(f'length of state dict: {len(state_dict.keys() )}' )
print(f'length of value function dict: {len(hf_value_function.state_dict().keys() )}' )
_UpperCAmelCase : str = dict(zip(model.state_dict().keys() , hf_value_function.state_dict().keys() ) )
for k, v in mapping.items():
_UpperCAmelCase : Optional[Any] = state_dict.pop(a__ )
hf_value_function.load_state_dict(a__ )
torch.save(hf_value_function.state_dict() , f'hub/hopper-medium-v2/unet/hor{hor}/diffusion_pytorch_model.bin' )
with open(f'hub/hopper-medium-v2/unet/hor{hor}/config.json' , "w" ) as f:
json.dump(a__ , a__ )
def __snake_case ( ) -> Tuple:
'''simple docstring'''
_UpperCAmelCase : Union[str, Any] = {
"in_channels": 14,
"down_block_types": ("DownResnetBlock1D", "DownResnetBlock1D", "DownResnetBlock1D", "DownResnetBlock1D"),
"up_block_types": (),
"out_block_type": "ValueFunction",
"mid_block_type": "ValueFunctionMidBlock1D",
"block_out_channels": (32, 64, 128, 256),
"layers_per_block": 1,
"downsample_each_block": True,
"sample_size": 65_536,
"out_channels": 14,
"extra_in_channels": 0,
"time_embedding_type": "positional",
"use_timestep_embedding": True,
"flip_sin_to_cos": False,
"freq_shift": 1,
"norm_num_groups": 8,
"act_fn": "mish",
}
_UpperCAmelCase : Optional[Any] = torch.load("/Users/bglickenhaus/Documents/diffuser/value_function-hopper-mediumv2-hor32.torch" )
_UpperCAmelCase : Optional[int] = model
_UpperCAmelCase : Any = UNetaDModel(**a__ )
print(f'length of state dict: {len(state_dict.keys() )}' )
print(f'length of value function dict: {len(hf_value_function.state_dict().keys() )}' )
_UpperCAmelCase : Union[str, Any] = dict(zip(state_dict.keys() , hf_value_function.state_dict().keys() ) )
for k, v in mapping.items():
_UpperCAmelCase : List[Any] = state_dict.pop(a__ )
hf_value_function.load_state_dict(a__ )
torch.save(hf_value_function.state_dict() , "hub/hopper-medium-v2/value_function/diffusion_pytorch_model.bin" )
with open("hub/hopper-medium-v2/value_function/config.json" , "w" ) as f:
json.dump(a__ , a__ )
if __name__ == "__main__":
unet(32)
# unet(128)
value_function()
| 371 |
"""simple docstring"""
def __snake_case ( SCREAMING_SNAKE_CASE__ : List[str] ) -> str:
'''simple docstring'''
_UpperCAmelCase , _UpperCAmelCase : Dict = [], []
while len(SCREAMING_SNAKE_CASE__ ) > 1:
_UpperCAmelCase , _UpperCAmelCase : int = min(SCREAMING_SNAKE_CASE__ ), max(SCREAMING_SNAKE_CASE__ )
start.append(SCREAMING_SNAKE_CASE__ )
end.append(SCREAMING_SNAKE_CASE__ )
collection.remove(SCREAMING_SNAKE_CASE__ )
collection.remove(SCREAMING_SNAKE_CASE__ )
end.reverse()
return start + collection + end
if __name__ == "__main__":
_lowerCAmelCase : int = input("Enter numbers separated by a comma:\n").strip()
_lowerCAmelCase : List[str] = [int(item) for item in user_input.split(",")]
print(*merge_sort(unsorted), sep=",")
| 202 | 0 |
import tensorflow as tf
from ...tf_utils import shape_list
class snake_case ( tf.keras.layers.Layer ):
'''simple docstring'''
def __init__( self : str , lowerCAmelCase : Union[str, Any] , lowerCAmelCase : Optional[Any] , lowerCAmelCase : Optional[int] , lowerCAmelCase : Union[str, Any] , lowerCAmelCase : List[Any]=1 , lowerCAmelCase : Union[str, Any]=False , **lowerCAmelCase : Any) -> Dict:
"""simple docstring"""
super().__init__(**lowerCAmelCase)
_snake_case : Optional[Any] = vocab_size
_snake_case : List[str] = d_embed
_snake_case : Optional[Any] = d_proj
_snake_case : int = cutoffs + [vocab_size]
_snake_case : List[Any] = [0] + self.cutoffs
_snake_case : List[Any] = div_val
_snake_case : str = self.cutoffs[0]
_snake_case : List[str] = len(self.cutoffs) - 1
_snake_case : Any = self.shortlist_size + self.n_clusters
_snake_case : Dict = keep_order
_snake_case : Union[str, Any] = []
_snake_case : List[Any] = []
def UpperCamelCase_ ( self : str , lowerCAmelCase : int) -> str:
"""simple docstring"""
if self.n_clusters > 0:
_snake_case : Optional[Any] = self.add_weight(
shape=(self.n_clusters, self.d_embed) , initializer="""zeros""" , trainable=lowerCAmelCase , name="""cluster_weight""")
_snake_case : List[str] = self.add_weight(
shape=(self.n_clusters,) , initializer="""zeros""" , trainable=lowerCAmelCase , name="""cluster_bias""")
if self.div_val == 1:
for i in range(len(self.cutoffs)):
if self.d_proj != self.d_embed:
_snake_case : List[Any] = self.add_weight(
shape=(self.d_embed, self.d_proj) , initializer="""zeros""" , trainable=lowerCAmelCase , name=F'''out_projs_._{i}''' , )
self.out_projs.append(lowerCAmelCase)
else:
self.out_projs.append(lowerCAmelCase)
_snake_case : int = self.add_weight(
shape=(self.vocab_size, self.d_embed) , initializer="""zeros""" , trainable=lowerCAmelCase , name=F'''out_layers_._{i}_._weight''' , )
_snake_case : Optional[int] = self.add_weight(
shape=(self.vocab_size,) , initializer="""zeros""" , trainable=lowerCAmelCase , name=F'''out_layers_._{i}_._bias''' , )
self.out_layers.append((weight, bias))
else:
for i in range(len(self.cutoffs)):
_snake_case , _snake_case : Union[str, Any] = self.cutoff_ends[i], self.cutoff_ends[i + 1]
_snake_case : str = self.d_embed // (self.div_val**i)
_snake_case : List[Any] = self.add_weight(
shape=(d_emb_i, self.d_proj) , initializer="""zeros""" , trainable=lowerCAmelCase , name=F'''out_projs_._{i}''')
self.out_projs.append(lowerCAmelCase)
_snake_case : List[str] = self.add_weight(
shape=(r_idx - l_idx, d_emb_i) , initializer="""zeros""" , trainable=lowerCAmelCase , name=F'''out_layers_._{i}_._weight''' , )
_snake_case : Union[str, Any] = self.add_weight(
shape=(r_idx - l_idx,) , initializer="""zeros""" , trainable=lowerCAmelCase , name=F'''out_layers_._{i}_._bias''' , )
self.out_layers.append((weight, bias))
super().build(lowerCAmelCase)
@staticmethod
def UpperCamelCase_ ( lowerCAmelCase : int , lowerCAmelCase : str , lowerCAmelCase : str , lowerCAmelCase : Optional[int]=None) -> List[Any]:
"""simple docstring"""
_snake_case : Union[str, Any] = x
if proj is not None:
_snake_case : Tuple = tf.einsum("""ibd,ed->ibe""" , lowerCAmelCase , lowerCAmelCase)
return tf.einsum("""ibd,nd->ibn""" , lowerCAmelCase , lowerCAmelCase) + b
@staticmethod
def UpperCamelCase_ ( lowerCAmelCase : List[str] , lowerCAmelCase : Dict) -> Union[str, Any]:
"""simple docstring"""
_snake_case : Union[str, Any] = shape_list(lowerCAmelCase)
_snake_case : Tuple = tf.range(lp_size[0] , dtype=target.dtype)
_snake_case : int = tf.stack([r, target] , 1)
return tf.gather_nd(lowerCAmelCase , lowerCAmelCase)
def UpperCamelCase_ ( self : str , lowerCAmelCase : Union[str, Any] , lowerCAmelCase : Tuple , lowerCAmelCase : Union[str, Any]=True , lowerCAmelCase : List[str]=False) -> Tuple:
"""simple docstring"""
_snake_case : str = 0
if self.n_clusters == 0:
_snake_case : Optional[Any] = self._logit(lowerCAmelCase , self.out_layers[0][0] , self.out_layers[0][1] , self.out_projs[0])
if target is not None:
_snake_case : Dict = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=lowerCAmelCase , logits=lowerCAmelCase)
_snake_case : Union[str, Any] = tf.nn.log_softmax(lowerCAmelCase , axis=-1)
else:
_snake_case : List[str] = shape_list(lowerCAmelCase)
_snake_case : Optional[Any] = []
_snake_case : List[Any] = tf.zeros(hidden_sizes[:2])
for i in range(len(self.cutoffs)):
_snake_case , _snake_case : Dict = self.cutoff_ends[i], self.cutoff_ends[i + 1]
if target is not None:
_snake_case : str = (target >= l_idx) & (target < r_idx)
_snake_case : Optional[Any] = tf.where(lowerCAmelCase)
_snake_case : List[str] = tf.boolean_mask(lowerCAmelCase , lowerCAmelCase) - l_idx
if self.div_val == 1:
_snake_case : int = self.out_layers[0][0][l_idx:r_idx]
_snake_case : str = self.out_layers[0][1][l_idx:r_idx]
else:
_snake_case : Any = self.out_layers[i][0]
_snake_case : Optional[Any] = self.out_layers[i][1]
if i == 0:
_snake_case : Union[str, Any] = tf.concat([cur_W, self.cluster_weight] , 0)
_snake_case : int = tf.concat([cur_b, self.cluster_bias] , 0)
_snake_case : Optional[Any] = self._logit(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , self.out_projs[0])
_snake_case : Dict = tf.nn.log_softmax(lowerCAmelCase)
out.append(head_logprob[..., : self.cutoffs[0]])
if target is not None:
_snake_case : str = tf.boolean_mask(lowerCAmelCase , lowerCAmelCase)
_snake_case : Dict = self._gather_logprob(lowerCAmelCase , lowerCAmelCase)
else:
_snake_case : List[Any] = self._logit(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , self.out_projs[i])
_snake_case : str = tf.nn.log_softmax(lowerCAmelCase)
_snake_case : str = self.cutoffs[0] + i - 1 # No probability for the head cluster
_snake_case : Any = head_logprob[..., cluster_prob_idx, None] + tail_logprob
out.append(lowerCAmelCase)
if target is not None:
_snake_case : int = tf.boolean_mask(lowerCAmelCase , lowerCAmelCase)
_snake_case : List[str] = tf.boolean_mask(lowerCAmelCase , lowerCAmelCase)
_snake_case : Tuple = self._gather_logprob(lowerCAmelCase , lowerCAmelCase)
cur_logprob += cur_head_logprob[:, self.cutoff_ends[1] + i - 1]
if target is not None:
loss += tf.scatter_nd(lowerCAmelCase , -cur_logprob , shape_list(lowerCAmelCase))
_snake_case : int = tf.concat(lowerCAmelCase , axis=-1)
if target is not None:
if return_mean:
_snake_case : Any = tf.reduce_mean(lowerCAmelCase)
# Add the training-time loss value to the layer using `self.add_loss()`.
self.add_loss(lowerCAmelCase)
# Log the loss as a metric (we could log arbitrary metrics,
# including different metrics for training and inference.
self.add_metric(lowerCAmelCase , name=self.name , aggregation="""mean""" if return_mean else """""")
return out
| 317 |
from __future__ import annotations
from typing import TypedDict
class snake_case ( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
snake_case_ : str
snake_case_ : int
def lowercase ( SCREAMING_SNAKE_CASE__ : str ) -> list[str]:
if not isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
raise TypeError("""The parameter s type must be str.""" )
return [s[i:] + s[:i] for i in range(len(SCREAMING_SNAKE_CASE__ ) )]
def lowercase ( SCREAMING_SNAKE_CASE__ : str ) -> BWTTransformDict:
if not isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
raise TypeError("""The parameter s type must be str.""" )
if not s:
raise ValueError("""The parameter s must not be empty.""" )
_snake_case : Union[str, Any] = all_rotations(SCREAMING_SNAKE_CASE__ )
rotations.sort() # sort the list of rotations in alphabetically order
# make a string composed of the last char of each rotation
_snake_case : BWTTransformDict = {
"bwt_string": "".join([word[-1] for word in rotations] ),
"idx_original_string": rotations.index(SCREAMING_SNAKE_CASE__ ),
}
return response
def lowercase ( SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : int ) -> str:
if not isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
raise TypeError("""The parameter bwt_string type must be str.""" )
if not bwt_string:
raise ValueError("""The parameter bwt_string must not be empty.""" )
try:
_snake_case : Tuple = int(SCREAMING_SNAKE_CASE__ )
except ValueError:
raise TypeError(
"""The parameter idx_original_string type must be int or passive"""
""" of cast to int.""" )
if idx_original_string < 0:
raise ValueError("""The parameter idx_original_string must not be lower than 0.""" )
if idx_original_string >= len(SCREAMING_SNAKE_CASE__ ):
raise ValueError(
"""The parameter idx_original_string must be lower than""" """ len(bwt_string).""" )
_snake_case : List[str] = [""""""] * len(SCREAMING_SNAKE_CASE__ )
for _ in range(len(SCREAMING_SNAKE_CASE__ ) ):
for i in range(len(SCREAMING_SNAKE_CASE__ ) ):
_snake_case : Union[str, Any] = bwt_string[i] + ordered_rotations[i]
ordered_rotations.sort()
return ordered_rotations[idx_original_string]
if __name__ == "__main__":
a__ = """Provide a string that I will generate its BWT transform: """
a__ = input(entry_msg).strip()
a__ = bwt_transform(s)
print(
F'''Burrows Wheeler transform for string \'{s}\' results '''
F'''in \'{result['bwt_string']}\''''
)
a__ = reverse_bwt(result["""bwt_string"""], result["""idx_original_string"""])
print(
F'''Reversing Burrows Wheeler transform for entry \'{result['bwt_string']}\' '''
F'''we get original string \'{original_string}\''''
)
| 317 | 1 |
from itertools import product
def __A ( __lowerCamelCase , __lowerCamelCase ) -> list[int]:
a = sides_number
a = max_face_number * dice_number
a = [0] * (max_total + 1)
a = 1
a = range(__lowerCamelCase , max_face_number + 1 )
for dice_numbers in product(__lowerCamelCase , repeat=__lowerCamelCase ):
a = sum(__lowerCamelCase )
totals_frequencies[total] += 1
return totals_frequencies
def __A ( ) -> float:
a = total_frequency_distribution(
sides_number=4 , dice_number=9 )
a = total_frequency_distribution(
sides_number=6 , dice_number=6 )
a = 0
a = 9
a = 4 * 9
a = 6
for peter_total in range(__lowerCamelCase , max_peter_total + 1 ):
peter_wins_count += peter_totals_frequencies[peter_total] * sum(
colin_totals_frequencies[min_colin_total:peter_total] )
a = (4**9) * (6**6)
a = peter_wins_count / total_games_number
a = round(__lowerCamelCase , ndigits=7 )
return rounded_peter_win_probability
if __name__ == "__main__":
print(F'{solution() = }')
| 347 |
import pytest
import datasets.config
from datasets.utils.info_utils import is_small_dataset
@pytest.mark.parametrize("""dataset_size""" , [None, 400 * 2**20, 600 * 2**20] )
@pytest.mark.parametrize("""input_in_memory_max_size""" , ["""default""", 0, 100 * 2**20, 900 * 2**20] )
def __A ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> Any:
if input_in_memory_max_size != "default":
monkeypatch.setattr(datasets.config , """IN_MEMORY_MAX_SIZE""" , __lowerCamelCase )
a = datasets.config.IN_MEMORY_MAX_SIZE
if input_in_memory_max_size == "default":
assert in_memory_max_size == 0
else:
assert in_memory_max_size == input_in_memory_max_size
if dataset_size and in_memory_max_size:
a = dataset_size < in_memory_max_size
else:
a = False
a = is_small_dataset(__lowerCamelCase )
assert result == expected
| 347 | 1 |
import os
import shutil
import tempfile
import unittest
import numpy as np
from transformers import AutoTokenizer, BarkProcessor
from transformers.testing_utils import require_torch, slow
@require_torch
class A__ ( unittest.TestCase ):
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : List[Any] = "ylacombe/bark-small"
UpperCamelCase : List[Any] = tempfile.mkdtemp()
UpperCamelCase : Optional[int] = "en_speaker_1"
UpperCamelCase : Tuple = "This is a test string"
UpperCamelCase : Tuple = "speaker_embeddings_path.json"
UpperCamelCase : List[str] = "speaker_embeddings"
def __UpperCamelCase( self , **A_ ):
'''simple docstring'''
return AutoTokenizer.from_pretrained(self.checkpoint , **A_ )
def __UpperCamelCase( self ):
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : str = self.get_tokenizer()
UpperCamelCase : Optional[Any] = BarkProcessor(tokenizer=A_ )
processor.save_pretrained(self.tmpdirname )
UpperCamelCase : Tuple = BarkProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
@slow
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : int = BarkProcessor.from_pretrained(
pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , )
processor.save_pretrained(
self.tmpdirname , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , speaker_embeddings_directory=self.speaker_embeddings_directory , )
UpperCamelCase : Optional[Any] = self.get_tokenizer(bos_token="(BOS)" , eos_token="(EOS)" )
UpperCamelCase : List[Any] = BarkProcessor.from_pretrained(
self.tmpdirname , self.speaker_embeddings_dict_path , bos_token="(BOS)" , eos_token="(EOS)" , )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : Tuple = BarkProcessor.from_pretrained(
pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , )
UpperCamelCase : int = 35
UpperCamelCase : str = 2
UpperCamelCase : Dict = 8
UpperCamelCase : Tuple = {
"semantic_prompt": np.ones(A_ ),
"coarse_prompt": np.ones((nb_codebooks_coarse, seq_len) ),
"fine_prompt": np.ones((nb_codebooks_total, seq_len) ),
}
# test providing already loaded voice_preset
UpperCamelCase : Any = processor(text=self.input_string , voice_preset=A_ )
UpperCamelCase : Dict = inputs["history_prompt"]
for key in voice_preset:
self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(A_ , np.array([] ) ).tolist() )
# test loading voice preset from npz file
UpperCamelCase : str = os.path.join(self.tmpdirname , "file.npz" )
np.savez(A_ , **A_ )
UpperCamelCase : Union[str, Any] = processor(text=self.input_string , voice_preset=A_ )
UpperCamelCase : List[Any] = inputs["history_prompt"]
for key in voice_preset:
self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(A_ , np.array([] ) ).tolist() )
# test loading voice preset from the hub
UpperCamelCase : Optional[Any] = processor(text=self.input_string , voice_preset=self.voice_preset )
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : Optional[int] = self.get_tokenizer()
UpperCamelCase : str = BarkProcessor(tokenizer=A_ )
UpperCamelCase : str = processor(text=self.input_string )
UpperCamelCase : Tuple = tokenizer(
self.input_string , padding="max_length" , max_length=256 , add_special_tokens=A_ , return_attention_mask=A_ , return_token_type_ids=A_ , )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key].squeeze().tolist() )
| 52 |
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
SCREAMING_SNAKE_CASE_:List[Any] = """▁"""
SCREAMING_SNAKE_CASE_:int = {"""vocab_file""": """spiece.model"""}
SCREAMING_SNAKE_CASE_:Any = {
"""vocab_file""": {"""google/pegasus-xsum""": """https://huggingface.co/google/pegasus-xsum/resolve/main/spiece.model"""}
}
SCREAMING_SNAKE_CASE_:Optional[Any] = {
"""google/pegasus-xsum""": 512,
}
SCREAMING_SNAKE_CASE_:Tuple = logging.get_logger(__name__)
class SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
__lowerCamelCase : Union[str, Any] = VOCAB_FILES_NAMES
__lowerCamelCase : List[Any] = VOCAB_FILES_NAMES
__lowerCamelCase : Optional[Any] = PRETRAINED_VOCAB_FILES_MAP
__lowerCamelCase : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowerCamelCase : str = ["input_ids", "attention_mask"]
def __init__( self, lowerCamelCase__, lowerCamelCase__="<pad>", lowerCamelCase__="</s>", lowerCamelCase__="<unk>", lowerCamelCase__="<mask_2>", lowerCamelCase__="<mask_1>", lowerCamelCase__=None, lowerCamelCase__=103, lowerCamelCase__ = None, **lowerCamelCase__, ):
A : int = offset
if additional_special_tokens is not None:
if not isinstance(lowerCamelCase__, lowerCamelCase__ ):
raise TypeError(
f'''additional_special_tokens should be of type {type(lowerCamelCase__ )}, but is'''
f''' {type(lowerCamelCase__ )}''' )
A : Dict = (
([mask_token_sent] + additional_special_tokens)
if mask_token_sent not in additional_special_tokens and mask_token_sent is not None
else additional_special_tokens
)
# fill additional tokens with ..., <unk_token_102> in case not all additional tokens are already taken
additional_special_tokens_extended += [
f'''<unk_{i}>''' for i in range(len(lowerCamelCase__ ), self.offset - 1 )
]
if len(set(lowerCamelCase__ ) ) != len(lowerCamelCase__ ):
raise ValueError(
"""Please make sure that the provided additional_special_tokens do not contain an incorrectly"""
f''' shifted list of <unk_x> tokens. Found {additional_special_tokens_extended}.''' )
A : int = additional_special_tokens_extended
else:
A : Optional[int] = [mask_token_sent] if mask_token_sent is not None else []
additional_special_tokens += [f'''<unk_{i}>''' for i in range(2, self.offset )]
A : Any = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
eos_token=lowerCamelCase__, unk_token=lowerCamelCase__, mask_token=lowerCamelCase__, pad_token=lowerCamelCase__, mask_token_sent=lowerCamelCase__, offset=lowerCamelCase__, additional_special_tokens=lowerCamelCase__, sp_model_kwargs=self.sp_model_kwargs, **lowerCamelCase__, )
A : Union[str, Any] = mask_token_sent
A : Optional[Any] = vocab_file
A : Optional[int] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(lowerCamelCase__ )
# add special tokens to encoder dict
A : Dict[int, str] = {
0: self.pad_token,
1: self.eos_token,
}
if self.mask_token_sent is not None:
self.encoder.update(
{
2: self.mask_token_sent,
3: self.mask_token,
} )
if self.offset > 0:
# entries 2-104 are only used for pretraining and called <mask_1>, <mask_2>, unk_2, ...unk_102
# mask_token_sent is already added to list -> so start at 1
self.encoder.update({i + 3: additional_special_tokens[i] for i in range(1, self.offset - 1 )} )
A : Dict[str, int] = {v: k for k, v in self.encoder.items()}
@property
def _lowerCAmelCase ( self ):
return len(self.sp_model ) + self.offset
def _lowerCAmelCase ( self ):
A : Optional[int] = {self.convert_ids_to_tokens(lowerCamelCase__ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ):
A : List[Any] = self.__dict__.copy()
A : Union[str, Any] = None
return state
def __setstate__( self, lowerCamelCase__ ):
A : List[Any] = d
# for backward compatibility
if not hasattr(self, """sp_model_kwargs""" ):
A : int = {}
A : Tuple = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def _lowerCAmelCase ( self, lowerCamelCase__ ):
return self.sp_model.encode(lowerCamelCase__, out_type=lowerCamelCase__ )
def _lowerCAmelCase ( self, lowerCamelCase__ ):
if token in self.decoder:
return self.decoder[token]
elif token in self.added_tokens_decoder:
return self.added_tokens_decoder[token]
A : List[str] = self.sp_model.piece_to_id(lowerCamelCase__ )
return sp_id + self.offset
def _lowerCAmelCase ( self, lowerCamelCase__ ):
if index in self.encoder:
return self.encoder[index]
elif index in self.added_tokens_encoder:
return self.added_tokens_encoder[index]
else:
A : Dict = self.sp_model.IdToPiece(index - self.offset )
return token
def _lowerCAmelCase ( self, lowerCamelCase__ ):
A : Optional[int] = []
A : Optional[Any] = """"""
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(lowerCamelCase__ ) + token
A : int = []
else:
current_sub_tokens.append(lowerCamelCase__ )
out_string += self.sp_model.decode(lowerCamelCase__ )
return out_string.strip()
def _lowerCAmelCase ( self, lowerCamelCase__=False ):
return 1
def _lowerCAmelCase ( self, lowerCamelCase__ ):
A : Optional[int] = set(self.all_special_ids ) # call it once instead of inside list comp
all_special_ids.remove(self.unk_token_id ) # <unk> is only sometimes special
return [1 if x in all_special_ids else 0 for x in seq]
def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__ = None, lowerCamelCase__ = False ):
if already_has_special_tokens:
return self._special_token_mask(lowerCamelCase__ )
elif token_ids_a is None:
return self._special_token_mask(lowerCamelCase__ ) + [1]
else:
return self._special_token_mask(token_ids_a + token_ids_a ) + [1]
def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__=None ):
if token_ids_a is None:
return token_ids_a + [self.eos_token_id]
# We don't expect to process pairs, but leave the pair logic for API consistency
return token_ids_a + token_ids_a + [self.eos_token_id]
def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__ = None ):
if not os.path.isdir(lowerCamelCase__ ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
A : Any = os.path.join(
lowerCamelCase__, (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowerCamelCase__ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file, lowerCamelCase__ )
elif not os.path.isfile(self.vocab_file ):
with open(lowerCamelCase__, """wb""" ) as fi:
A : Dict = self.sp_model.serialized_model_proto()
fi.write(lowerCamelCase__ )
return (out_vocab_file,)
| 116 | 0 |
def _a ( _lowerCamelCase = 100_0000 ) -> int:
"""simple docstring"""
__snake_case : List[str] = set(range(3 , _lowerCamelCase , 2 ) )
primes.add(2 )
for p in range(3 , _lowerCamelCase , 2 ):
if p not in primes:
continue
primes.difference_update(set(range(p * p , _lowerCamelCase , _lowerCamelCase ) ) )
__snake_case : Tuple = [float(_lowerCamelCase ) for n in range(limit + 1 )]
for p in primes:
for n in range(_lowerCamelCase , limit + 1 , _lowerCamelCase ):
phi[n] *= 1 - 1 / p
return int(sum(phi[2:] ) )
if __name__ == "__main__":
print(f"""{solution() = }""")
| 354 |
'''simple docstring'''
def _a ( _lowerCamelCase ) -> Dict:
"""simple docstring"""
__snake_case : str = 0
__snake_case : Optional[int] = len(_lowerCamelCase )
for i in range(n - 1 ):
for j in range(i + 1 , _lowerCamelCase ):
if arr[i] > arr[j]:
num_inversions += 1
return num_inversions
def _a ( _lowerCamelCase ) -> Tuple:
"""simple docstring"""
if len(_lowerCamelCase ) <= 1:
return arr, 0
__snake_case : Any = len(_lowerCamelCase ) // 2
__snake_case : List[str] = arr[0:mid]
__snake_case : int = arr[mid:]
__snake_case , __snake_case : List[Any] = count_inversions_recursive(_lowerCamelCase )
__snake_case , __snake_case : Tuple = count_inversions_recursive(_lowerCamelCase )
__snake_case , __snake_case : str = _count_cross_inversions(_lowerCamelCase , _lowerCamelCase )
__snake_case : str = inversion_p + inversions_q + cross_inversions
return c, num_inversions
def _a ( _lowerCamelCase , _lowerCamelCase ) -> int:
"""simple docstring"""
__snake_case : Any = []
__snake_case : List[str] = 0
while i < len(_lowerCamelCase ) and j < len(_lowerCamelCase ):
if p[i] > q[j]:
# if P[1] > Q[j], then P[k] > Q[k] for all i < k <= len(P)
# These are all inversions. The claim emerges from the
# property that P is sorted.
num_inversion += len(_lowerCamelCase ) - i
r.append(q[j] )
j += 1
else:
r.append(p[i] )
i += 1
if i < len(_lowerCamelCase ):
r.extend(p[i:] )
else:
r.extend(q[j:] )
return r, num_inversion
def _a ( ) -> Optional[int]:
"""simple docstring"""
__snake_case : Optional[Any] = [10, 2, 1, 5, 5, 2, 11]
# this arr has 8 inversions:
# (10, 2), (10, 1), (10, 5), (10, 5), (10, 2), (2, 1), (5, 2), (5, 2)
__snake_case : Optional[Any] = count_inversions_bf(_lowerCamelCase )
__snake_case , __snake_case : Union[str, Any] = count_inversions_recursive(_lowerCamelCase )
assert num_inversions_bf == num_inversions_recursive == 8
print("""number of inversions = """ , _lowerCamelCase )
# testing an array with zero inversion (a sorted arr_1)
arr_a.sort()
__snake_case : Any = count_inversions_bf(_lowerCamelCase )
__snake_case , __snake_case : Union[str, Any] = count_inversions_recursive(_lowerCamelCase )
assert num_inversions_bf == num_inversions_recursive == 0
print("""number of inversions = """ , _lowerCamelCase )
# an empty list should also have zero inversions
__snake_case : List[Any] = []
__snake_case : List[Any] = count_inversions_bf(_lowerCamelCase )
__snake_case , __snake_case : List[Any] = count_inversions_recursive(_lowerCamelCase )
assert num_inversions_bf == num_inversions_recursive == 0
print("""number of inversions = """ , _lowerCamelCase )
if __name__ == "__main__":
main()
| 13 | 0 |
"""simple docstring"""
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
import torch.nn as nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .embeddings import GaussianFourierProjection, TimestepEmbedding, Timesteps
from .modeling_utils import ModelMixin
from .unet_ad_blocks import get_down_block, get_mid_block, get_out_block, get_up_block
@dataclass
class UpperCamelCase ( lowerCAmelCase__ ):
SCREAMING_SNAKE_CASE_ = 42
class UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ ):
@register_to_config
def __init__( self, lowerCAmelCase__ = 6_5536, lowerCAmelCase__ = None, lowerCAmelCase__ = 2, lowerCAmelCase__ = 2, lowerCAmelCase__ = 0, lowerCAmelCase__ = "fourier", lowerCAmelCase__ = True, lowerCAmelCase__ = False, lowerCAmelCase__ = 0.0, lowerCAmelCase__ = ("DownBlock1DNoSkip", "DownBlock1D", "AttnDownBlock1D"), lowerCAmelCase__ = ("AttnUpBlock1D", "UpBlock1D", "UpBlock1DNoSkip"), lowerCAmelCase__ = "UNetMidBlock1D", lowerCAmelCase__ = None, lowerCAmelCase__ = (32, 32, 64), lowerCAmelCase__ = None, lowerCAmelCase__ = 8, lowerCAmelCase__ = 1, lowerCAmelCase__ = False, ) -> Union[str, Any]:
super().__init__()
snake_case_ = sample_size
# time
if time_embedding_type == "fourier":
snake_case_ = GaussianFourierProjection(
embedding_size=8, set_W_to_weight=lowerCAmelCase__, log=lowerCAmelCase__, flip_sin_to_cos=lowerCAmelCase__)
snake_case_ = 2 * block_out_channels[0]
elif time_embedding_type == "positional":
snake_case_ = Timesteps(
block_out_channels[0], flip_sin_to_cos=lowerCAmelCase__, downscale_freq_shift=lowerCAmelCase__)
snake_case_ = block_out_channels[0]
if use_timestep_embedding:
snake_case_ = block_out_channels[0] * 4
snake_case_ = TimestepEmbedding(
in_channels=lowerCAmelCase__, time_embed_dim=lowerCAmelCase__, act_fn=lowerCAmelCase__, out_dim=block_out_channels[0], )
snake_case_ = nn.ModuleList([])
snake_case_ = None
snake_case_ = nn.ModuleList([])
snake_case_ = None
# down
snake_case_ = in_channels
for i, down_block_type in enumerate(lowerCAmelCase__):
snake_case_ = output_channel
snake_case_ = block_out_channels[i]
if i == 0:
input_channel += extra_in_channels
snake_case_ = i == len(lowerCAmelCase__) - 1
snake_case_ = get_down_block(
lowerCAmelCase__, num_layers=lowerCAmelCase__, in_channels=lowerCAmelCase__, out_channels=lowerCAmelCase__, temb_channels=block_out_channels[0], add_downsample=not is_final_block or downsample_each_block, )
self.down_blocks.append(lowerCAmelCase__)
# mid
snake_case_ = get_mid_block(
lowerCAmelCase__, in_channels=block_out_channels[-1], mid_channels=block_out_channels[-1], out_channels=block_out_channels[-1], embed_dim=block_out_channels[0], num_layers=lowerCAmelCase__, add_downsample=lowerCAmelCase__, )
# up
snake_case_ = list(reversed(lowerCAmelCase__))
snake_case_ = reversed_block_out_channels[0]
if out_block_type is None:
snake_case_ = out_channels
else:
snake_case_ = block_out_channels[0]
for i, up_block_type in enumerate(lowerCAmelCase__):
snake_case_ = output_channel
snake_case_ = (
reversed_block_out_channels[i + 1] if i < len(lowerCAmelCase__) - 1 else final_upsample_channels
)
snake_case_ = i == len(lowerCAmelCase__) - 1
snake_case_ = get_up_block(
lowerCAmelCase__, num_layers=lowerCAmelCase__, in_channels=lowerCAmelCase__, out_channels=lowerCAmelCase__, temb_channels=block_out_channels[0], add_upsample=not is_final_block, )
self.up_blocks.append(lowerCAmelCase__)
snake_case_ = output_channel
# out
snake_case_ = norm_num_groups if norm_num_groups is not None else min(block_out_channels[0] // 4, 32)
snake_case_ = get_out_block(
out_block_type=lowerCAmelCase__, num_groups_out=lowerCAmelCase__, embed_dim=block_out_channels[0], out_channels=lowerCAmelCase__, act_fn=lowerCAmelCase__, fc_dim=block_out_channels[-1] // 4, )
def a_ ( self, lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__ = True, ) -> Union[UNetaDOutput, Tuple]:
snake_case_ = timestep
if not torch.is_tensor(lowerCAmelCase__):
snake_case_ = torch.tensor([timesteps], dtype=torch.long, device=sample.device)
elif torch.is_tensor(lowerCAmelCase__) and len(timesteps.shape) == 0:
snake_case_ = timesteps[None].to(sample.device)
snake_case_ = self.time_proj(lowerCAmelCase__)
if self.config.use_timestep_embedding:
snake_case_ = self.time_mlp(lowerCAmelCase__)
else:
snake_case_ = timestep_embed[..., None]
snake_case_ = timestep_embed.repeat([1, 1, sample.shape[2]]).to(sample.dtype)
snake_case_ = timestep_embed.broadcast_to((sample.shape[:1] + timestep_embed.shape[1:]))
# 2. down
snake_case_ = ()
for downsample_block in self.down_blocks:
snake_case_ , snake_case_ = downsample_block(hidden_states=lowerCAmelCase__, temb=lowerCAmelCase__)
down_block_res_samples += res_samples
# 3. mid
if self.mid_block:
snake_case_ = self.mid_block(lowerCAmelCase__, lowerCAmelCase__)
# 4. up
for i, upsample_block in enumerate(self.up_blocks):
snake_case_ = down_block_res_samples[-1:]
snake_case_ = down_block_res_samples[:-1]
snake_case_ = upsample_block(lowerCAmelCase__, res_hidden_states_tuple=lowerCAmelCase__, temb=lowerCAmelCase__)
# 5. post-process
if self.out_block:
snake_case_ = self.out_block(lowerCAmelCase__, lowerCAmelCase__)
if not return_dict:
return (sample,)
return UNetaDOutput(sample=lowerCAmelCase__)
| 69 |
from __future__ import annotations
import matplotlib.pyplot as plt # type: ignore
import numpy
# initial triangle of Koch snowflake
lowercase_ = numpy.array([0, 0])
lowercase_ = numpy.array([0.5, 0.866_0254])
lowercase_ = numpy.array([1, 0])
lowercase_ = [VECTOR_1, VECTOR_2, VECTOR_3, VECTOR_1]
def a__ ( snake_case , snake_case ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : List[str] = initial_vectors
for _ in range(snake_case ):
__SCREAMING_SNAKE_CASE : Dict = iteration_step(snake_case )
return vectors
def a__ ( snake_case ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : str = []
for i, start_vector in enumerate(vectors[:-1] ):
__SCREAMING_SNAKE_CASE : str = vectors[i + 1]
new_vectors.append(snake_case )
__SCREAMING_SNAKE_CASE : List[str] = end_vector - start_vector
new_vectors.append(start_vector + difference_vector / 3 )
new_vectors.append(
start_vector + difference_vector / 3 + rotate(difference_vector / 3 , 60 ) )
new_vectors.append(start_vector + difference_vector * 2 / 3 )
new_vectors.append(vectors[-1] )
return new_vectors
def a__ ( snake_case , snake_case ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Tuple = numpy.radians(snake_case )
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : str = numpy.cos(snake_case ), numpy.sin(snake_case )
__SCREAMING_SNAKE_CASE : Any = numpy.array(((c, -s), (s, c)) )
return numpy.dot(snake_case , snake_case )
def a__ ( snake_case ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : int = plt.gca()
axes.set_aspect('''equal''' )
# matplotlib.pyplot.plot takes a list of all x-coordinates and a list of all
# y-coordinates as inputs, which are constructed from the vector-list using
# zip()
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Tuple = zip(*snake_case )
plt.plot(snake_case , snake_case )
plt.show()
if __name__ == "__main__":
import doctest
doctest.testmod()
lowercase_ = iterate(INITIAL_VECTORS, 5)
plot(processed_vectors)
| 303 | 0 |
'''simple docstring'''
def lowercase__( __UpperCamelCase: int = 50 ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Tuple = [[0] * 3 for _ in range(length + 1 )]
for row_length in range(length + 1 ):
for tile_length in range(2 ,5 ):
for tile_start in range(row_length - tile_length + 1 ):
different_colour_ways_number[row_length][tile_length - 2] += (
different_colour_ways_number[row_length - tile_start - tile_length][
tile_length - 2
]
+ 1
)
return sum(different_colour_ways_number[length] )
if __name__ == "__main__":
print(F"""{solution() = }""")
| 246 |
'''simple docstring'''
from scipy.stats import spearmanr
import datasets
UpperCamelCase_ = "\nThe Spearman rank-order correlation coefficient is a measure of the\nrelationship between two datasets. Like other correlation coefficients,\nthis one varies between -1 and +1 with 0 implying no correlation.\nPositive correlations imply that as data in dataset x increases, so\ndoes data in dataset y. Negative correlations imply that as x increases,\ny decreases. Correlations of -1 or +1 imply an exact monotonic relationship.\n\nUnlike the Pearson correlation, the Spearman correlation does not\nassume that both datasets are normally distributed.\n\nThe p-value roughly indicates the probability of an uncorrelated system\nproducing datasets that have a Spearman correlation at least as extreme\nas the one computed from these datasets. The p-values are not entirely\nreliable but are probably reasonable for datasets larger than 500 or so.\n"
UpperCamelCase_ = "\nArgs:\n predictions (`List[float]`): Predicted labels, as returned by a model.\n references (`List[float]`): Ground truth labels.\n return_pvalue (`bool`): If `True`, returns the p-value. If `False`, returns\n only the spearmanr score. Defaults to `False`.\nReturns:\n spearmanr (`float`): Spearman correlation coefficient.\n p-value (`float`): p-value. **Note**: is only returned if `return_pvalue=True` is input.\nExamples:\n Example 1:\n >>> spearmanr_metric = datasets.load_metric(\"spearmanr\")\n >>> results = spearmanr_metric.compute(references=[1, 2, 3, 4, 5], predictions=[10, 9, 2.5, 6, 4])\n >>> print(results)\n {'spearmanr': -0.7}\n\n Example 2:\n >>> spearmanr_metric = datasets.load_metric(\"spearmanr\")\n >>> results = spearmanr_metric.compute(references=[1, 2, 3, 4, 5],\n ... predictions=[10, 9, 2.5, 6, 4],\n ... return_pvalue=True)\n >>> print(results['spearmanr'])\n -0.7\n >>> print(round(results['spearmanr_pvalue'], 2))\n 0.19\n"
UpperCamelCase_ = R"\\n@book{kokoska2000crc,\n title={CRC standard probability and statistics tables and formulae},\n author={Kokoska, Stephen and Zwillinger, Daniel},\n year={2000},\n publisher={Crc Press}\n}\n@article{2020SciPy-NMeth,\n author = {Virtanen, Pauli and Gommers, Ralf and Oliphant, Travis E. and\n Haberland, Matt and Reddy, Tyler and Cournapeau, David and\n Burovski, Evgeni and Peterson, Pearu and Weckesser, Warren and\n Bright, Jonathan and {van der Walt}, St{\'e}fan J. and\n Brett, Matthew and Wilson, Joshua and Millman, K. Jarrod and\n Mayorov, Nikolay and Nelson, Andrew R. J. and Jones, Eric and\n Kern, Robert and Larson, Eric and Carey, C J and\n Polat, {\.I}lhan and Feng, Yu and Moore, Eric W. and\n {VanderPlas}, Jake and Laxalde, Denis and Perktold, Josef and\n Cimrman, Robert and Henriksen, Ian and Quintero, E. A. and\n Harris, Charles R. and Archibald, Anne M. and\n Ribeiro, Ant{\^o}nio H. and Pedregosa, Fabian and\n {van Mulbregt}, Paul and {SciPy 1.0 Contributors}},\n title = {{{SciPy} 1.0: Fundamental Algorithms for Scientific\n Computing in Python}},\n journal = {Nature Methods},\n year = {2020},\n volume = {17},\n pages = {261--272},\n adsurl = {https://rdcu.be/b08Wh},\n doi = {10.1038/s41592-019-0686-2},\n}\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _a ( datasets.Metric ):
'''simple docstring'''
def UpperCamelCase_ ( self ):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION, citation=_CITATION, inputs_description=_KWARGS_DESCRIPTION, features=datasets.Features(
{
'predictions': datasets.Value('float' ),
'references': datasets.Value('float' ),
} ), reference_urls=['https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.spearmanr.html'], )
def UpperCamelCase_ ( self, A, A, A=False ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = spearmanr(A, A )
if return_pvalue:
return {"spearmanr": results[0], "spearmanr_pvalue": results[1]}
else:
return {"spearmanr": results[0]}
| 246 | 1 |
"""simple docstring"""
import functools
def _snake_case ( UpperCamelCase : str , UpperCamelCase : str ):
UpperCAmelCase : str = len(UpperCamelCase )
UpperCAmelCase : int = len(UpperCamelCase )
@functools.cache
def min_distance(UpperCamelCase : int , UpperCamelCase : int ) -> int:
# if first word index is overflow - delete all from the second word
if indexa >= len_worda:
return len_worda - indexa
# if second word index is overflow - delete all from the first word
if indexa >= len_worda:
return len_worda - indexa
UpperCAmelCase : List[str] = int(worda[indexa] != worda[indexa] ) # current letters not identical
return min(
1 + min_distance(indexa + 1 , UpperCamelCase ) , 1 + min_distance(UpperCamelCase , indexa + 1 ) , diff + min_distance(indexa + 1 , indexa + 1 ) , )
return min_distance(0 , 0 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 109 |
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import cached_download, hf_hub_url
from PIL import Image
from transformers import DPTConfig, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
_UpperCamelCase = logging.get_logger(__name__)
def a_ ( _lowerCAmelCase ) -> Tuple:
__lowerCamelCase : Optional[int] = DPTConfig()
if "large" in checkpoint_url:
__lowerCamelCase : List[Any] = 1024
__lowerCamelCase : Union[str, Any] = 4096
__lowerCamelCase : Any = 24
__lowerCamelCase : List[str] = 16
__lowerCamelCase : int = [5, 11, 17, 23]
__lowerCamelCase : List[Any] = [256, 512, 1024, 1024]
__lowerCamelCase : Tuple = (1, 384, 384)
if "ade" in checkpoint_url:
__lowerCamelCase : Tuple = True
__lowerCamelCase : Union[str, Any] = 150
__lowerCamelCase : Any = 'huggingface/label-files'
__lowerCamelCase : Dict = 'ade20k-id2label.json'
__lowerCamelCase : Tuple = json.load(open(cached_download(hf_hub_url(_lowerCAmelCase ,_lowerCAmelCase ,repo_type='dataset' ) ) ,'r' ) )
__lowerCamelCase : List[Any] = {int(_lowerCAmelCase ): v for k, v in idalabel.items()}
__lowerCamelCase : List[Any] = idalabel
__lowerCamelCase : List[str] = {v: k for k, v in idalabel.items()}
__lowerCamelCase : Optional[int] = [1, 150, 480, 480]
return config, expected_shape
def a_ ( _lowerCAmelCase ) -> Tuple:
__lowerCamelCase : Optional[int] = ['pretrained.model.head.weight', 'pretrained.model.head.bias']
for k in ignore_keys:
state_dict.pop(_lowerCAmelCase ,_lowerCAmelCase )
def a_ ( _lowerCAmelCase ) -> Dict:
if (
"pretrained.model" in name
and "cls_token" not in name
and "pos_embed" not in name
and "patch_embed" not in name
):
__lowerCamelCase : str = name.replace('pretrained.model' ,'dpt.encoder' )
if "pretrained.model" in name:
__lowerCamelCase : Union[str, Any] = name.replace('pretrained.model' ,'dpt.embeddings' )
if "patch_embed" in name:
__lowerCamelCase : int = name.replace('patch_embed' ,'patch_embeddings' )
if "pos_embed" in name:
__lowerCamelCase : Optional[Any] = name.replace('pos_embed' ,'position_embeddings' )
if "attn.proj" in name:
__lowerCamelCase : Union[str, Any] = name.replace('attn.proj' ,'attention.output.dense' )
if "proj" in name and "project" not in name:
__lowerCamelCase : List[str] = name.replace('proj' ,'projection' )
if "blocks" in name:
__lowerCamelCase : Optional[int] = name.replace('blocks' ,'layer' )
if "mlp.fc1" in name:
__lowerCamelCase : Dict = name.replace('mlp.fc1' ,'intermediate.dense' )
if "mlp.fc2" in name:
__lowerCamelCase : int = name.replace('mlp.fc2' ,'output.dense' )
if "norm1" in name:
__lowerCamelCase : Optional[int] = name.replace('norm1' ,'layernorm_before' )
if "norm2" in name:
__lowerCamelCase : str = name.replace('norm2' ,'layernorm_after' )
if "scratch.output_conv" in name:
__lowerCamelCase : int = name.replace('scratch.output_conv' ,'head' )
if "scratch" in name:
__lowerCamelCase : Any = name.replace('scratch' ,'neck' )
if "layer1_rn" in name:
__lowerCamelCase : List[str] = name.replace('layer1_rn' ,'convs.0' )
if "layer2_rn" in name:
__lowerCamelCase : str = name.replace('layer2_rn' ,'convs.1' )
if "layer3_rn" in name:
__lowerCamelCase : List[Any] = name.replace('layer3_rn' ,'convs.2' )
if "layer4_rn" in name:
__lowerCamelCase : Optional[Any] = name.replace('layer4_rn' ,'convs.3' )
if "refinenet" in name:
__lowerCamelCase : Any = int(name[len('neck.refinenet' ) : len('neck.refinenet' ) + 1] )
# tricky here: we need to map 4 to 0, 3 to 1, 2 to 2 and 1 to 3
__lowerCamelCase : Tuple = name.replace(F'refinenet{layer_idx}' ,F'fusion_stage.layers.{abs(layer_idx-4 )}' )
if "out_conv" in name:
__lowerCamelCase : Any = name.replace('out_conv' ,'projection' )
if "resConfUnit1" in name:
__lowerCamelCase : Optional[Any] = name.replace('resConfUnit1' ,'residual_layer1' )
if "resConfUnit2" in name:
__lowerCamelCase : List[str] = name.replace('resConfUnit2' ,'residual_layer2' )
if "conv1" in name:
__lowerCamelCase : Any = name.replace('conv1' ,'convolution1' )
if "conv2" in name:
__lowerCamelCase : Optional[Any] = name.replace('conv2' ,'convolution2' )
# readout blocks
if "pretrained.act_postprocess1.0.project.0" in name:
__lowerCamelCase : str = name.replace('pretrained.act_postprocess1.0.project.0' ,'neck.reassemble_stage.readout_projects.0.0' )
if "pretrained.act_postprocess2.0.project.0" in name:
__lowerCamelCase : List[Any] = name.replace('pretrained.act_postprocess2.0.project.0' ,'neck.reassemble_stage.readout_projects.1.0' )
if "pretrained.act_postprocess3.0.project.0" in name:
__lowerCamelCase : Tuple = name.replace('pretrained.act_postprocess3.0.project.0' ,'neck.reassemble_stage.readout_projects.2.0' )
if "pretrained.act_postprocess4.0.project.0" in name:
__lowerCamelCase : List[Any] = name.replace('pretrained.act_postprocess4.0.project.0' ,'neck.reassemble_stage.readout_projects.3.0' )
# resize blocks
if "pretrained.act_postprocess1.3" in name:
__lowerCamelCase : Optional[int] = name.replace('pretrained.act_postprocess1.3' ,'neck.reassemble_stage.layers.0.projection' )
if "pretrained.act_postprocess1.4" in name:
__lowerCamelCase : Dict = name.replace('pretrained.act_postprocess1.4' ,'neck.reassemble_stage.layers.0.resize' )
if "pretrained.act_postprocess2.3" in name:
__lowerCamelCase : Dict = name.replace('pretrained.act_postprocess2.3' ,'neck.reassemble_stage.layers.1.projection' )
if "pretrained.act_postprocess2.4" in name:
__lowerCamelCase : Any = name.replace('pretrained.act_postprocess2.4' ,'neck.reassemble_stage.layers.1.resize' )
if "pretrained.act_postprocess3.3" in name:
__lowerCamelCase : Tuple = name.replace('pretrained.act_postprocess3.3' ,'neck.reassemble_stage.layers.2.projection' )
if "pretrained.act_postprocess4.3" in name:
__lowerCamelCase : List[Any] = name.replace('pretrained.act_postprocess4.3' ,'neck.reassemble_stage.layers.3.projection' )
if "pretrained.act_postprocess4.4" in name:
__lowerCamelCase : Optional[int] = name.replace('pretrained.act_postprocess4.4' ,'neck.reassemble_stage.layers.3.resize' )
if "pretrained" in name:
__lowerCamelCase : Union[str, Any] = name.replace('pretrained' ,'dpt' )
if "bn" in name:
__lowerCamelCase : Union[str, Any] = name.replace('bn' ,'batch_norm' )
if "head" in name:
__lowerCamelCase : Dict = name.replace('head' ,'head.head' )
if "encoder.norm" in name:
__lowerCamelCase : str = name.replace('encoder.norm' ,'layernorm' )
if "auxlayer" in name:
__lowerCamelCase : int = name.replace('auxlayer' ,'auxiliary_head.head' )
return name
def a_ ( _lowerCAmelCase ,_lowerCAmelCase ) -> Optional[Any]:
for i in range(config.num_hidden_layers ):
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
__lowerCamelCase : Union[str, Any] = state_dict.pop(F'dpt.encoder.layer.{i}.attn.qkv.weight' )
__lowerCamelCase : Optional[Any] = state_dict.pop(F'dpt.encoder.layer.{i}.attn.qkv.bias' )
# next, add query, keys and values (in that order) to the state dict
__lowerCamelCase : Union[str, Any] = in_proj_weight[: config.hidden_size, :]
__lowerCamelCase : List[Any] = in_proj_bias[: config.hidden_size]
__lowerCamelCase : Union[str, Any] = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
__lowerCamelCase : Any = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
__lowerCamelCase : Any = in_proj_weight[
-config.hidden_size :, :
]
__lowerCamelCase : Optional[Any] = in_proj_bias[-config.hidden_size :]
def a_ ( ) -> Optional[int]:
__lowerCamelCase : int = 'http://images.cocodataset.org/val2017/000000039769.jpg'
__lowerCamelCase : Dict = Image.open(requests.get(_lowerCAmelCase ,stream=_lowerCAmelCase ).raw )
return im
@torch.no_grad()
def a_ ( _lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ) -> List[Any]:
__lowerCamelCase ,__lowerCamelCase : List[Any] = get_dpt_config(_lowerCAmelCase )
# load original state_dict from URL
__lowerCamelCase : str = torch.hub.load_state_dict_from_url(_lowerCAmelCase ,map_location='cpu' )
# remove certain keys
remove_ignore_keys_(_lowerCAmelCase )
# rename keys
for key in state_dict.copy().keys():
__lowerCamelCase : int = state_dict.pop(_lowerCAmelCase )
__lowerCamelCase : List[str] = val
# read in qkv matrices
read_in_q_k_v(_lowerCAmelCase ,_lowerCAmelCase )
# load HuggingFace model
__lowerCamelCase : Tuple = DPTForSemanticSegmentation(_lowerCAmelCase ) if 'ade' in checkpoint_url else DPTForDepthEstimation(_lowerCAmelCase )
model.load_state_dict(_lowerCAmelCase )
model.eval()
# Check outputs on an image
__lowerCamelCase : Dict = 480 if 'ade' in checkpoint_url else 384
__lowerCamelCase : Dict = DPTImageProcessor(size=_lowerCAmelCase )
__lowerCamelCase : Optional[int] = prepare_img()
__lowerCamelCase : Optional[int] = image_processor(_lowerCAmelCase ,return_tensors='pt' )
# forward pass
__lowerCamelCase : List[Any] = model(**_lowerCAmelCase ).logits if 'ade' in checkpoint_url else model(**_lowerCAmelCase ).predicted_depth
# Assert logits
__lowerCamelCase : Optional[int] = torch.tensor([[6.3199, 6.3629, 6.4148], [6.3850, 6.3615, 6.4166], [6.3519, 6.3176, 6.3575]] )
if "ade" in checkpoint_url:
__lowerCamelCase : List[str] = torch.tensor([[4.0480, 4.2420, 4.4360], [4.3124, 4.5693, 4.8261], [4.5768, 4.8965, 5.2163]] )
assert outputs.shape == torch.Size(_lowerCAmelCase )
assert (
torch.allclose(outputs[0, 0, :3, :3] ,_lowerCAmelCase ,atol=1E-4 )
if "ade" in checkpoint_url
else torch.allclose(outputs[0, :3, :3] ,_lowerCAmelCase )
)
Path(_lowerCAmelCase ).mkdir(exist_ok=_lowerCAmelCase )
print(F'Saving model to {pytorch_dump_folder_path}' )
model.save_pretrained(_lowerCAmelCase )
print(F'Saving image processor to {pytorch_dump_folder_path}' )
image_processor.save_pretrained(_lowerCAmelCase )
if push_to_hub:
print('Pushing model to hub...' )
model.push_to_hub(
repo_path_or_name=Path(_lowerCAmelCase ,_lowerCAmelCase ) ,organization='nielsr' ,commit_message='Add model' ,use_temp_dir=_lowerCAmelCase ,)
image_processor.push_to_hub(
repo_path_or_name=Path(_lowerCAmelCase ,_lowerCAmelCase ) ,organization='nielsr' ,commit_message='Add image processor' ,use_temp_dir=_lowerCAmelCase ,)
if __name__ == "__main__":
_UpperCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--checkpoint_url',
default='https://github.com/intel-isl/DPT/releases/download/1_0/dpt_large-midas-2f21e586.pt',
type=str,
help='URL of the original DPT checkpoint you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path',
default=None,
type=str,
required=True,
help='Path to the output PyTorch model directory.',
)
parser.add_argument(
'--push_to_hub',
action='store_true',
)
parser.add_argument(
'--model_name',
default='dpt-large',
type=str,
help='Name of the model, in case you\'re pushing to the hub.',
)
_UpperCamelCase = parser.parse_args()
convert_dpt_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub, args.model_name)
| 208 | 0 |
"""simple docstring"""
def _snake_case ( _snake_case : int = 1000 ):
lowerCAmelCase : List[str] = 2**power
lowerCAmelCase : Tuple = str(_snake_case )
lowerCAmelCase : Optional[int] = list(_snake_case )
lowerCAmelCase : List[Any] = 0
for i in list_num:
sum_of_num += int(_snake_case )
return sum_of_num
if __name__ == "__main__":
snake_case__ : List[str] = int(input('''Enter the power of 2: ''').strip())
print('''2 ^ ''', power, ''' = ''', 2**power)
snake_case__ : str = solution(power)
print('''Sum of the digits is: ''', result)
| 314 |
"""simple docstring"""
import warnings
from typing import List, Optional, Tuple, Union
import numpy as np
import PIL
import torch
from ...models import UNetaDModel
from ...schedulers import RePaintScheduler
from ...utils import PIL_INTERPOLATION, logging, randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
snake_case__ : List[str] = logging.get_logger(__name__) # pylint: disable=invalid-name
def _snake_case ( _snake_case : Union[List, PIL.Image.Image, torch.Tensor] ):
warnings.warn(
'''The preprocess method is deprecated and will be removed in a future version. Please'''
''' use VaeImageProcessor.preprocess instead''' , _snake_case , )
if isinstance(_snake_case , torch.Tensor ):
return image
elif isinstance(_snake_case , PIL.Image.Image ):
lowerCAmelCase : Optional[int] = [image]
if isinstance(image[0] , PIL.Image.Image ):
lowerCAmelCase, lowerCAmelCase : int = image[0].size
lowerCAmelCase, lowerCAmelCase : Optional[int] = (x - x % 8 for x in (w, h)) # resize to integer multiple of 8
lowerCAmelCase : Union[str, Any] = [np.array(i.resize((w, h) , resample=PIL_INTERPOLATION['''lanczos'''] ) )[None, :] for i in image]
lowerCAmelCase : int = np.concatenate(_snake_case , axis=0 )
lowerCAmelCase : Optional[Any] = np.array(_snake_case ).astype(np.floataa ) / 255.0
lowerCAmelCase : List[Any] = image.transpose(0 , 3 , 1 , 2 )
lowerCAmelCase : List[str] = 2.0 * image - 1.0
lowerCAmelCase : List[Any] = torch.from_numpy(_snake_case )
elif isinstance(image[0] , torch.Tensor ):
lowerCAmelCase : Any = torch.cat(_snake_case , dim=0 )
return image
def _snake_case ( _snake_case : Union[List, PIL.Image.Image, torch.Tensor] ):
if isinstance(_snake_case , torch.Tensor ):
return mask
elif isinstance(_snake_case , PIL.Image.Image ):
lowerCAmelCase : str = [mask]
if isinstance(mask[0] , PIL.Image.Image ):
lowerCAmelCase, lowerCAmelCase : int = mask[0].size
lowerCAmelCase, lowerCAmelCase : Dict = (x - x % 32 for x in (w, h)) # resize to integer multiple of 32
lowerCAmelCase : List[str] = [np.array(m.convert('''L''' ).resize((w, h) , resample=PIL_INTERPOLATION['''nearest'''] ) )[None, :] for m in mask]
lowerCAmelCase : Optional[int] = np.concatenate(_snake_case , axis=0 )
lowerCAmelCase : Dict = mask.astype(np.floataa ) / 255.0
lowerCAmelCase : List[str] = 0
lowerCAmelCase : Optional[int] = 1
lowerCAmelCase : List[Any] = torch.from_numpy(_snake_case )
elif isinstance(mask[0] , torch.Tensor ):
lowerCAmelCase : Optional[int] = torch.cat(_snake_case , dim=0 )
return mask
class snake_case_( a__ ):
__UpperCamelCase = 42
__UpperCamelCase = 42
def __init__( self : List[Any] , UpperCamelCase_ : List[str] , UpperCamelCase_ : Optional[Any] ):
super().__init__()
self.register_modules(unet=UpperCamelCase_ , scheduler=UpperCamelCase_ )
@torch.no_grad()
def __call__( self : Union[str, Any] , UpperCamelCase_ : Union[torch.Tensor, PIL.Image.Image] , UpperCamelCase_ : Union[torch.Tensor, PIL.Image.Image] , UpperCamelCase_ : int = 2_5_0 , UpperCamelCase_ : float = 0.0 , UpperCamelCase_ : int = 1_0 , UpperCamelCase_ : int = 1_0 , UpperCamelCase_ : Optional[Union[torch.Generator, List[torch.Generator]]] = None , UpperCamelCase_ : Optional[str] = "pil" , UpperCamelCase_ : bool = True , ):
lowerCAmelCase : Optional[Any] = image
lowerCAmelCase : Tuple = _preprocess_image(UpperCamelCase_ )
lowerCAmelCase : int = original_image.to(device=self.device , dtype=self.unet.dtype )
lowerCAmelCase : Optional[Any] = _preprocess_mask(UpperCamelCase_ )
lowerCAmelCase : str = mask_image.to(device=self.device , dtype=self.unet.dtype )
lowerCAmelCase : Union[str, Any] = original_image.shape[0]
# sample gaussian noise to begin the loop
if isinstance(UpperCamelCase_ , UpperCamelCase_ ) and len(UpperCamelCase_ ) != batch_size:
raise ValueError(
F'''You have passed a list of generators of length {len(UpperCamelCase_ )}, but requested an effective batch'''
F''' size of {batch_size}. Make sure the batch size matches the length of the generators.''' )
lowerCAmelCase : Union[str, Any] = original_image.shape
lowerCAmelCase : str = randn_tensor(UpperCamelCase_ , generator=UpperCamelCase_ , device=self.device , dtype=self.unet.dtype )
# set step values
self.scheduler.set_timesteps(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , self.device )
lowerCAmelCase : Optional[int] = eta
lowerCAmelCase : List[str] = self.scheduler.timesteps[0] + 1
lowerCAmelCase : List[str] = generator[0] if isinstance(UpperCamelCase_ , UpperCamelCase_ ) else generator
for i, t in enumerate(self.progress_bar(self.scheduler.timesteps ) ):
if t < t_last:
# predict the noise residual
lowerCAmelCase : Union[str, Any] = self.unet(UpperCamelCase_ , UpperCamelCase_ ).sample
# compute previous image: x_t -> x_t-1
lowerCAmelCase : str = self.scheduler.step(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ).prev_sample
else:
# compute the reverse: x_t-1 -> x_t
lowerCAmelCase : Optional[Any] = self.scheduler.undo_step(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
lowerCAmelCase : List[Any] = t
lowerCAmelCase : int = (image / 2 + 0.5).clamp(0 , 1 )
lowerCAmelCase : Union[str, Any] = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
lowerCAmelCase : Tuple = self.numpy_to_pil(UpperCamelCase_ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=UpperCamelCase_ )
| 314 | 1 |
"""simple docstring"""
from __future__ import annotations
import inspect
import unittest
import numpy as np
from transformers import ResNetConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFResNetForImageClassification, TFResNetModel
from transformers.models.resnet.modeling_tf_resnet import TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class _UpperCAmelCase :
def __init__( self : int , A : Tuple , A : int=3 , A : List[str]=32 , A : Dict=3 , A : Any=10 , A : Dict=[10, 20, 30, 40] , A : Optional[Any]=[1, 1, 2, 1] , A : Union[str, Any]=True , A : Optional[Any]=True , A : Any="relu" , A : Optional[Any]=3 , A : Tuple=None , ) -> Dict:
lowercase_ : str = parent
lowercase_ : List[Any] = batch_size
lowercase_ : Optional[int] = image_size
lowercase_ : int = num_channels
lowercase_ : int = embeddings_size
lowercase_ : str = hidden_sizes
lowercase_ : List[str] = depths
lowercase_ : Dict = is_training
lowercase_ : int = use_labels
lowercase_ : Any = hidden_act
lowercase_ : List[Any] = num_labels
lowercase_ : Tuple = scope
lowercase_ : Optional[Any] = len(A )
def A ( self : str ) -> Tuple:
lowercase_ : Optional[int] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowercase_ : Union[str, Any] = None
if self.use_labels:
lowercase_ : List[str] = ids_tensor([self.batch_size] , self.num_labels )
lowercase_ : Optional[int] = self.get_config()
return config, pixel_values, labels
def A ( self : Dict ) -> int:
return ResNetConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , image_size=self.image_size , )
def A ( self : str , A : Tuple , A : str , A : str ) -> str:
lowercase_ : str = TFResNetModel(config=A )
lowercase_ : Union[str, Any] = model(A )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def A ( self : Any , A : int , A : List[Any] , A : Optional[Any] ) -> Optional[Any]:
lowercase_ : Tuple = self.num_labels
lowercase_ : Union[str, Any] = TFResNetForImageClassification(A )
lowercase_ : Tuple = model(A , labels=A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def A ( self : Union[str, Any] ) -> Tuple:
lowercase_ : Tuple = self.prepare_config_and_inputs()
lowercase_ , lowercase_ , lowercase_ : Dict = config_and_inputs
lowercase_ : int = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_tf
class _UpperCAmelCase ( _A , _A , unittest.TestCase ):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = (TFResNetModel, TFResNetForImageClassification) if is_tf_available() else ()
SCREAMING_SNAKE_CASE_ : List[Any] = (
{"feature-extraction": TFResNetModel, "image-classification": TFResNetForImageClassification}
if is_tf_available()
else {}
)
SCREAMING_SNAKE_CASE_ : Optional[int] = False
SCREAMING_SNAKE_CASE_ : Optional[int] = False
SCREAMING_SNAKE_CASE_ : str = False
SCREAMING_SNAKE_CASE_ : Optional[int] = False
SCREAMING_SNAKE_CASE_ : Any = False
def A ( self : Union[str, Any] ) -> List[Any]:
lowercase_ : int = TFResNetModelTester(self )
lowercase_ : str = ConfigTester(self , config_class=A , has_text_modality=A )
def A ( self : Dict ) -> Optional[Any]:
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def A ( self : Dict ) -> List[Any]:
return
@unittest.skip(reason='''ResNet does not use inputs_embeds''' )
def A ( self : Any ) -> Any:
pass
@unittest.skip(reason='''ResNet does not support input and output embeddings''' )
def A ( self : List[str] ) -> Optional[Any]:
pass
def A ( self : str ) -> Tuple:
lowercase_ , lowercase_ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase_ : int = model_class(A )
lowercase_ : str = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase_ : str = [*signature.parameters.keys()]
lowercase_ : str = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , A )
def A ( self : List[str] ) -> Tuple:
lowercase_ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*A )
def A ( self : List[Any] ) -> List[str]:
def check_hidden_states_output(A : Union[str, Any] , A : int , A : List[Any] ):
lowercase_ : int = model_class(A )
lowercase_ : Optional[Any] = model(**self._prepare_for_class(A , A ) )
lowercase_ : Union[str, Any] = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
lowercase_ : Any = self.model_tester.num_stages
self.assertEqual(len(A ) , expected_num_stages + 1 )
# ResNet's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
lowercase_ , lowercase_ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
lowercase_ : Union[str, Any] = ['''basic''', '''bottleneck''']
for model_class in self.all_model_classes:
for layer_type in layers_type:
lowercase_ : List[str] = layer_type
lowercase_ : Tuple = True
check_hidden_states_output(A , A , A )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowercase_ : Optional[Any] = True
check_hidden_states_output(A , A , A )
def A ( self : Optional[int] ) -> Tuple:
lowercase_ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*A )
@slow
def A ( self : List[str] ) -> Optional[int]:
for model_name in TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase_ : Tuple = TFResNetModel.from_pretrained(A )
self.assertIsNotNone(A )
def lowercase ( ):
lowercase_ : List[str] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_tf
@require_vision
class _UpperCAmelCase ( unittest.TestCase ):
@cached_property
def A ( self : Any ) -> Optional[int]:
return (
AutoImageProcessor.from_pretrained(TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
if is_vision_available()
else None
)
@slow
def A ( self : Any ) -> Optional[int]:
lowercase_ : Optional[int] = TFResNetForImageClassification.from_pretrained(TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
lowercase_ : List[Any] = self.default_image_processor
lowercase_ : Dict = prepare_img()
lowercase_ : List[str] = image_processor(images=A , return_tensors='''tf''' )
# forward pass
lowercase_ : Tuple = model(**A )
# verify the logits
lowercase_ : Optional[int] = tf.TensorShape((1, 10_00) )
self.assertEqual(outputs.logits.shape , A )
lowercase_ : Optional[Any] = tf.constant([-11.1069, -9.7877, -8.3777] )
self.assertTrue(np.allclose(outputs.logits[0, :3].numpy() , A , atol=1e-4 ) )
| 33 |
"""simple docstring"""
from __future__ import annotations
__A : List[Any] = [-10, -5, 0, 5, 5.1, 11, 13, 21, 3, 4, -21, -10, -5, -1, 0]
__A : str = [-5, 0, 5, 5.1, 11, 13, 21, -1, 4, -1, -10, -5, -1, 0, -1]
def lowercase ( __snake_case : list[float] ):
lowercase_ : List[str] = []
lowercase_ : List[Any] = len(__snake_case )
for i in range(__snake_case ):
lowercase_ : float = -1
for j in range(i + 1 , __snake_case ):
if arr[i] < arr[j]:
lowercase_ : List[str] = arr[j]
break
result.append(__snake_case )
return result
def lowercase ( __snake_case : list[float] ):
lowercase_ : List[str] = []
for i, outer in enumerate(__snake_case ):
lowercase_ : float = -1
for inner in arr[i + 1 :]:
if outer < inner:
lowercase_ : List[Any] = inner
break
result.append(__snake_case )
return result
def lowercase ( __snake_case : list[float] ):
lowercase_ : List[str] = len(__snake_case )
lowercase_ : list[float] = []
lowercase_ : list[float] = [-1] * arr_size
for index in reversed(range(__snake_case ) ):
if stack:
while stack[-1] <= arr[index]:
stack.pop()
if not stack:
break
if stack:
lowercase_ : Optional[Any] = stack[-1]
stack.append(arr[index] )
return result
if __name__ == "__main__":
from doctest import testmod
from timeit import timeit
testmod()
print(next_greatest_element_slow(arr))
print(next_greatest_element_fast(arr))
print(next_greatest_element(arr))
__A : int = (
'''from __main__ import arr, next_greatest_element_slow, '''
'''next_greatest_element_fast, next_greatest_element'''
)
print(
'''next_greatest_element_slow():''',
timeit('''next_greatest_element_slow(arr)''', setup=setup),
)
print(
'''next_greatest_element_fast():''',
timeit('''next_greatest_element_fast(arr)''', setup=setup),
)
print(
''' next_greatest_element():''',
timeit('''next_greatest_element(arr)''', setup=setup),
)
| 33 | 1 |
"""simple docstring"""
from __future__ import annotations
def __a ( __lowerCamelCase ):
UpperCAmelCase_ : Dict = [True] * limit
UpperCAmelCase_ : Optional[int] = False
UpperCAmelCase_ : int = False
UpperCAmelCase_ : int = True
for i in range(3, int(limit**0.5 + 1 ), 2 ):
UpperCAmelCase_ : List[Any] = i * 2
while index < limit:
UpperCAmelCase_ : Union[str, Any] = False
UpperCAmelCase_ : str = index + i
UpperCAmelCase_ : Union[str, Any] = [2]
for i in range(3, __lowerCamelCase, 2 ):
if is_prime[i]:
primes.append(__lowerCamelCase )
return primes
def __a ( __lowerCamelCase = 100_0000 ):
UpperCAmelCase_ : Optional[Any] = prime_sieve(__lowerCamelCase )
UpperCAmelCase_ : int = 0
UpperCAmelCase_ : Optional[int] = 0
for i in range(len(__lowerCamelCase ) ):
for j in range(i + length, len(__lowerCamelCase ) ):
UpperCAmelCase_ : Optional[Any] = sum(primes[i:j] )
if sol >= ceiling:
break
if sol in primes:
UpperCAmelCase_ : List[str] = j - i
UpperCAmelCase_ : List[Any] = sol
return largest
if __name__ == "__main__":
print(f"""{solution() = }""")
| 23 |
"""simple docstring"""
import string
# frequency taken from https://en.wikipedia.org/wiki/Letter_frequency
_a = {
'E': 12.70,
'T': 9.06,
'A': 8.17,
'O': 7.51,
'I': 6.97,
'N': 6.75,
'S': 6.33,
'H': 6.09,
'R': 5.99,
'D': 4.25,
'L': 4.03,
'C': 2.78,
'U': 2.76,
'M': 2.41,
'W': 2.36,
'F': 2.23,
'G': 2.02,
'Y': 1.97,
'P': 1.93,
'B': 1.29,
'V': 0.98,
'K': 0.77,
'J': 0.15,
'X': 0.15,
'Q': 0.10,
'Z': 0.07,
}
_a = 'ETAOINSHRDLCUMWFGYPBVKJXQZ'
_a = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ'
def __a ( __lowerCamelCase ):
UpperCAmelCase_ : Any = {letter: 0 for letter in string.ascii_uppercase}
for letter in message.upper():
if letter in LETTERS:
letter_count[letter] += 1
return letter_count
def __a ( __lowerCamelCase ):
return x[0]
def __a ( __lowerCamelCase ):
UpperCAmelCase_ : Any = get_letter_count(__lowerCamelCase )
UpperCAmelCase_ : dict[int, list[str]] = {
freq: [] for letter, freq in letter_to_freq.items()
}
for letter in LETTERS:
freq_to_letter[letter_to_freq[letter]].append(__lowerCamelCase )
UpperCAmelCase_ : dict[int, str] = {}
for freq in freq_to_letter:
freq_to_letter[freq].sort(key=ETAOIN.find, reverse=__lowerCamelCase )
UpperCAmelCase_ : Any = "".join(freq_to_letter[freq] )
UpperCAmelCase_ : str = list(freq_to_letter_str.items() )
freq_pairs.sort(key=__lowerCamelCase, reverse=__lowerCamelCase )
UpperCAmelCase_ : list[str] = [freq_pair[1] for freq_pair in freq_pairs]
return "".join(__lowerCamelCase )
def __a ( __lowerCamelCase ):
UpperCAmelCase_ : Any = get_frequency_order(__lowerCamelCase )
UpperCAmelCase_ : int = 0
for common_letter in ETAOIN[:6]:
if common_letter in freq_order[:6]:
match_score += 1
for uncommon_letter in ETAOIN[-6:]:
if uncommon_letter in freq_order[-6:]:
match_score += 1
return match_score
if __name__ == "__main__":
import doctest
doctest.testmod()
| 23 | 1 |
import shutil
import tempfile
import unittest
import numpy as np
from transformers.testing_utils import (
is_pt_tf_cross_test,
require_tf,
require_torch,
require_torchvision,
require_vision,
)
from transformers.utils import is_tf_available, is_torch_available, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import AutoProcessor, SamImageProcessor, SamProcessor
if is_torch_available():
import torch
if is_tf_available():
import tensorflow as tf
@require_vision
@require_torchvision
class UpperCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase ( self ) -> Union[str, Any]:
UpperCamelCase :Optional[Any] = tempfile.mkdtemp()
UpperCamelCase :Optional[Any] = SamImageProcessor()
UpperCamelCase :Dict = SamProcessor(SCREAMING_SNAKE_CASE_ )
processor.save_pretrained(self.tmpdirname )
def UpperCAmelCase ( self , **SCREAMING_SNAKE_CASE_ ) -> Optional[Any]:
return AutoProcessor.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE_ ).image_processor
def UpperCAmelCase ( self ) -> str:
shutil.rmtree(self.tmpdirname )
def UpperCAmelCase ( self ) -> Dict:
UpperCamelCase :List[str] = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
UpperCamelCase :List[str] = [Image.fromarray(np.moveaxis(SCREAMING_SNAKE_CASE_ , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def UpperCAmelCase ( self ) -> Optional[Any]:
UpperCamelCase :Any = SamProcessor(image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
UpperCamelCase :List[Any] = self.get_image_processor(do_normalize=SCREAMING_SNAKE_CASE_ , padding_value=1.0 )
UpperCamelCase :List[Any] = SamProcessor.from_pretrained(self.tmpdirname , do_normalize=SCREAMING_SNAKE_CASE_ , padding_value=1.0 )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase ( self ) -> Any:
UpperCamelCase :List[Any] = self.get_image_processor()
UpperCamelCase :Optional[int] = SamProcessor(image_processor=SCREAMING_SNAKE_CASE_ )
UpperCamelCase :Tuple = self.prepare_image_inputs()
UpperCamelCase :int = image_processor(SCREAMING_SNAKE_CASE_ , return_tensors='''np''' )
UpperCamelCase :Union[str, Any] = processor(images=SCREAMING_SNAKE_CASE_ , return_tensors='''np''' )
input_feat_extract.pop('''original_sizes''' ) # pop original_sizes as it is popped in the processor
input_feat_extract.pop('''reshaped_input_sizes''' ) # pop original_sizes as it is popped in the processor
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
@require_torch
def UpperCAmelCase ( self ) -> Dict:
UpperCamelCase :str = self.get_image_processor()
UpperCamelCase :Union[str, Any] = SamProcessor(image_processor=SCREAMING_SNAKE_CASE_ )
UpperCamelCase :Optional[int] = [torch.ones((1, 3, 5, 5) )]
UpperCamelCase :Any = [[1764, 2646]]
UpperCamelCase :str = [[683, 1024]]
UpperCamelCase :Optional[int] = processor.post_process_masks(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.assertEqual(masks[0].shape , (1, 3, 1764, 2646) )
UpperCamelCase :Dict = processor.post_process_masks(
SCREAMING_SNAKE_CASE_ , torch.tensor(SCREAMING_SNAKE_CASE_ ) , torch.tensor(SCREAMING_SNAKE_CASE_ ) )
self.assertEqual(masks[0].shape , (1, 3, 1764, 2646) )
# should also work with np
UpperCamelCase :Any = [np.ones((1, 3, 5, 5) )]
UpperCamelCase :List[Any] = processor.post_process_masks(SCREAMING_SNAKE_CASE_ , np.array(SCREAMING_SNAKE_CASE_ ) , np.array(SCREAMING_SNAKE_CASE_ ) )
self.assertEqual(masks[0].shape , (1, 3, 1764, 2646) )
UpperCamelCase :int = [[1, 0], [0, 1]]
with self.assertRaises(SCREAMING_SNAKE_CASE_ ):
UpperCamelCase :Any = processor.post_process_masks(SCREAMING_SNAKE_CASE_ , np.array(SCREAMING_SNAKE_CASE_ ) , np.array(SCREAMING_SNAKE_CASE_ ) )
@require_vision
@require_tf
class UpperCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase ( self ) -> Dict:
UpperCamelCase :str = tempfile.mkdtemp()
UpperCamelCase :Optional[int] = SamImageProcessor()
UpperCamelCase :str = SamProcessor(SCREAMING_SNAKE_CASE_ )
processor.save_pretrained(self.tmpdirname )
def UpperCAmelCase ( self , **SCREAMING_SNAKE_CASE_ ) -> Optional[Any]:
return AutoProcessor.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE_ ).image_processor
def UpperCAmelCase ( self ) -> List[Any]:
shutil.rmtree(self.tmpdirname )
def UpperCAmelCase ( self ) -> Optional[int]:
UpperCamelCase :str = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
UpperCamelCase :Union[str, Any] = [Image.fromarray(np.moveaxis(SCREAMING_SNAKE_CASE_ , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def UpperCAmelCase ( self ) -> int:
UpperCamelCase :List[Any] = SamProcessor(image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
UpperCamelCase :Any = self.get_image_processor(do_normalize=SCREAMING_SNAKE_CASE_ , padding_value=1.0 )
UpperCamelCase :Any = SamProcessor.from_pretrained(self.tmpdirname , do_normalize=SCREAMING_SNAKE_CASE_ , padding_value=1.0 )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase ( self ) -> Dict:
UpperCamelCase :Dict = self.get_image_processor()
UpperCamelCase :int = SamProcessor(image_processor=SCREAMING_SNAKE_CASE_ )
UpperCamelCase :str = self.prepare_image_inputs()
UpperCamelCase :str = image_processor(SCREAMING_SNAKE_CASE_ , return_tensors='''np''' )
UpperCamelCase :Union[str, Any] = processor(images=SCREAMING_SNAKE_CASE_ , return_tensors='''np''' )
input_feat_extract.pop('''original_sizes''' ) # pop original_sizes as it is popped in the processor
input_feat_extract.pop('''reshaped_input_sizes''' ) # pop reshaped_input_sizes as it is popped in the processor
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
@require_tf
def UpperCAmelCase ( self ) -> Any:
UpperCamelCase :Dict = self.get_image_processor()
UpperCamelCase :List[str] = SamProcessor(image_processor=SCREAMING_SNAKE_CASE_ )
UpperCamelCase :Tuple = [tf.ones((1, 3, 5, 5) )]
UpperCamelCase :Optional[Any] = [[1764, 2646]]
UpperCamelCase :int = [[683, 1024]]
UpperCamelCase :Optional[Any] = processor.post_process_masks(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , return_tensors='''tf''' )
self.assertEqual(masks[0].shape , (1, 3, 1764, 2646) )
UpperCamelCase :List[Any] = processor.post_process_masks(
SCREAMING_SNAKE_CASE_ , tf.convert_to_tensor(SCREAMING_SNAKE_CASE_ ) , tf.convert_to_tensor(SCREAMING_SNAKE_CASE_ ) , return_tensors='''tf''' , )
self.assertEqual(masks[0].shape , (1, 3, 1764, 2646) )
# should also work with np
UpperCamelCase :Optional[Any] = [np.ones((1, 3, 5, 5) )]
UpperCamelCase :List[str] = processor.post_process_masks(
SCREAMING_SNAKE_CASE_ , np.array(SCREAMING_SNAKE_CASE_ ) , np.array(SCREAMING_SNAKE_CASE_ ) , return_tensors='''tf''' )
self.assertEqual(masks[0].shape , (1, 3, 1764, 2646) )
UpperCamelCase :List[str] = [[1, 0], [0, 1]]
with self.assertRaises(tf.errors.InvalidArgumentError ):
UpperCamelCase :List[Any] = processor.post_process_masks(
SCREAMING_SNAKE_CASE_ , np.array(SCREAMING_SNAKE_CASE_ ) , np.array(SCREAMING_SNAKE_CASE_ ) , return_tensors='''tf''' )
@require_vision
@require_torchvision
class UpperCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase ( self ) -> Union[str, Any]:
UpperCamelCase :List[str] = tempfile.mkdtemp()
UpperCamelCase :Optional[Any] = SamImageProcessor()
UpperCamelCase :Any = SamProcessor(SCREAMING_SNAKE_CASE_ )
processor.save_pretrained(self.tmpdirname )
def UpperCAmelCase ( self , **SCREAMING_SNAKE_CASE_ ) -> List[Any]:
return AutoProcessor.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE_ ).image_processor
def UpperCAmelCase ( self ) -> Any:
shutil.rmtree(self.tmpdirname )
def UpperCAmelCase ( self ) -> List[Any]:
UpperCamelCase :Dict = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
UpperCamelCase :Optional[int] = [Image.fromarray(np.moveaxis(SCREAMING_SNAKE_CASE_ , 0 , -1 ) ) for x in image_inputs]
return image_inputs
@is_pt_tf_cross_test
def UpperCAmelCase ( self ) -> Any:
UpperCamelCase :Tuple = self.get_image_processor()
UpperCamelCase :List[str] = SamProcessor(image_processor=SCREAMING_SNAKE_CASE_ )
UpperCamelCase :List[str] = np.random.randint(0 , 2 , size=(1, 3, 5, 5) ).astype(np.floataa )
UpperCamelCase :List[str] = [tf.convert_to_tensor(SCREAMING_SNAKE_CASE_ )]
UpperCamelCase :List[Any] = [torch.tensor(SCREAMING_SNAKE_CASE_ )]
UpperCamelCase :str = [[1764, 2646]]
UpperCamelCase :Optional[Any] = [[683, 1024]]
UpperCamelCase :Dict = processor.post_process_masks(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , return_tensors='''tf''' )
UpperCamelCase :Optional[Any] = processor.post_process_masks(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , return_tensors='''pt''' )
self.assertTrue(np.all(tf_masks[0].numpy() == pt_masks[0].numpy() ) )
@is_pt_tf_cross_test
def UpperCAmelCase ( self ) -> Union[str, Any]:
UpperCamelCase :List[Any] = self.get_image_processor()
UpperCamelCase :Tuple = SamProcessor(image_processor=SCREAMING_SNAKE_CASE_ )
UpperCamelCase :Any = self.prepare_image_inputs()
UpperCamelCase :str = image_processor(SCREAMING_SNAKE_CASE_ , return_tensors='''pt''' )['''pixel_values'''].numpy()
UpperCamelCase :Optional[int] = processor(images=SCREAMING_SNAKE_CASE_ , return_tensors='''pt''' )['''pixel_values'''].numpy()
UpperCamelCase :List[str] = image_processor(SCREAMING_SNAKE_CASE_ , return_tensors='''tf''' )['''pixel_values'''].numpy()
UpperCamelCase :Optional[Any] = processor(images=SCREAMING_SNAKE_CASE_ , return_tensors='''tf''' )['''pixel_values'''].numpy()
self.assertTrue(np.allclose(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) )
self.assertTrue(np.allclose(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) )
self.assertTrue(np.allclose(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) )
| 259 |
from json import JSONDecodeError # Workaround for requests.exceptions.JSONDecodeError
import requests
def _A ( SCREAMING_SNAKE_CASE__ : str = "isbn/0140328726" ):
UpperCamelCase :Optional[int] = olid.strip().strip('''/''' ) # Remove leading/trailing whitespace & slashes
if new_olid.count('''/''' ) != 1:
UpperCamelCase :str = F'''{olid} is not a valid Open Library olid'''
raise ValueError(SCREAMING_SNAKE_CASE__ )
return requests.get(F'''https://openlibrary.org/{new_olid}.json''' ).json()
def _A ( SCREAMING_SNAKE_CASE__ : dict ):
UpperCamelCase :str = {
'''title''': '''Title''',
'''publish_date''': '''Publish date''',
'''authors''': '''Authors''',
'''number_of_pages''': '''Number of pages:''',
'''first_sentence''': '''First sentence''',
'''isbn_10''': '''ISBN (10)''',
'''isbn_13''': '''ISBN (13)''',
}
UpperCamelCase :Optional[Any] = {better_key: ol_book_data[key] for key, better_key in desired_keys.items()}
UpperCamelCase :List[str] = [
get_openlibrary_data(author['''key'''] )['''name'''] for author in data['''Authors''']
]
UpperCamelCase :int = data['''First sentence''']['''value''']
for key, value in data.items():
if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
UpperCamelCase :List[str] = ''', '''.join(SCREAMING_SNAKE_CASE__ )
return data
if __name__ == "__main__":
import doctest
doctest.testmod()
while True:
__snake_case = input("""\nEnter the ISBN code to search (or 'quit' to stop): """).strip()
if isbn.lower() in ("", "q", "quit", "exit", "stop"):
break
if len(isbn) not in (10, 13) or not isbn.isdigit():
print(f'''Sorry, {isbn} is not a valid ISBN. Please, input a valid ISBN.''')
continue
print(f'''\nSearching Open Library for ISBN: {isbn}...\n''')
try:
__snake_case = summarize_book(get_openlibrary_data(f'''isbn/{isbn}'''))
print("""\n""".join(f'''{key}: {value}''' for key, value in book_summary.items()))
except JSONDecodeError: # Workaround for requests.exceptions.RequestException:
print(f'''Sorry, there are no results for ISBN: {isbn}.''')
| 259 | 1 |
"""simple docstring"""
import tempfile
import torch
from diffusers import PNDMScheduler
from .test_schedulers import SchedulerCommonTest
class lowercase__ ( __snake_case ):
'''simple docstring'''
UpperCamelCase = (PNDMScheduler,)
UpperCamelCase = (('num_inference_steps', 50),)
def lowercase__ ( self : Optional[Any] , **_UpperCAmelCase : Any ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase_ = {
"num_train_timesteps": 1000,
"beta_start": 0.0001,
"beta_end": 0.02,
"beta_schedule": "linear",
}
config.update(**_UpperCAmelCase )
return config
def lowercase__ ( self : Tuple , _UpperCAmelCase : List[Any]=0 , **_UpperCAmelCase : Any ) -> Tuple:
'''simple docstring'''
UpperCAmelCase_ = dict(self.forward_default_kwargs )
UpperCAmelCase_ = kwargs.pop("num_inference_steps" , _UpperCAmelCase )
UpperCAmelCase_ = self.dummy_sample
UpperCAmelCase_ = 0.1 * sample
UpperCAmelCase_ = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
UpperCAmelCase_ = self.get_scheduler_config(**_UpperCAmelCase )
UpperCAmelCase_ = scheduler_class(**_UpperCAmelCase )
scheduler.set_timesteps(_UpperCAmelCase )
# copy over dummy past residuals
UpperCAmelCase_ = dummy_past_residuals[:]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(_UpperCAmelCase )
UpperCAmelCase_ = scheduler_class.from_pretrained(_UpperCAmelCase )
new_scheduler.set_timesteps(_UpperCAmelCase )
# copy over dummy past residuals
UpperCAmelCase_ = dummy_past_residuals[:]
UpperCAmelCase_ = scheduler.step_prk(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase ).prev_sample
UpperCAmelCase_ = new_scheduler.step_prk(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
UpperCAmelCase_ = scheduler.step_plms(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase ).prev_sample
UpperCAmelCase_ = new_scheduler.step_plms(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def lowercase__ ( self : List[Any] ) -> Dict:
'''simple docstring'''
pass
def lowercase__ ( self : List[Any] , _UpperCAmelCase : Optional[Any]=0 , **_UpperCAmelCase : List[Any] ) -> Tuple:
'''simple docstring'''
UpperCAmelCase_ = dict(self.forward_default_kwargs )
UpperCAmelCase_ = kwargs.pop("num_inference_steps" , _UpperCAmelCase )
UpperCAmelCase_ = self.dummy_sample
UpperCAmelCase_ = 0.1 * sample
UpperCAmelCase_ = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
UpperCAmelCase_ = self.get_scheduler_config()
UpperCAmelCase_ = scheduler_class(**_UpperCAmelCase )
scheduler.set_timesteps(_UpperCAmelCase )
# copy over dummy past residuals (must be after setting timesteps)
UpperCAmelCase_ = dummy_past_residuals[:]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(_UpperCAmelCase )
UpperCAmelCase_ = scheduler_class.from_pretrained(_UpperCAmelCase )
# copy over dummy past residuals
new_scheduler.set_timesteps(_UpperCAmelCase )
# copy over dummy past residual (must be after setting timesteps)
UpperCAmelCase_ = dummy_past_residuals[:]
UpperCAmelCase_ = scheduler.step_prk(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase ).prev_sample
UpperCAmelCase_ = new_scheduler.step_prk(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
UpperCAmelCase_ = scheduler.step_plms(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase ).prev_sample
UpperCAmelCase_ = new_scheduler.step_plms(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def lowercase__ ( self : int , **_UpperCAmelCase : int ) -> Tuple:
'''simple docstring'''
UpperCAmelCase_ = self.scheduler_classes[0]
UpperCAmelCase_ = self.get_scheduler_config(**_UpperCAmelCase )
UpperCAmelCase_ = scheduler_class(**_UpperCAmelCase )
UpperCAmelCase_ = 10
UpperCAmelCase_ = self.dummy_model()
UpperCAmelCase_ = self.dummy_sample_deter
scheduler.set_timesteps(_UpperCAmelCase )
for i, t in enumerate(scheduler.prk_timesteps ):
UpperCAmelCase_ = model(_UpperCAmelCase , _UpperCAmelCase )
UpperCAmelCase_ = scheduler.step_prk(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ).prev_sample
for i, t in enumerate(scheduler.plms_timesteps ):
UpperCAmelCase_ = model(_UpperCAmelCase , _UpperCAmelCase )
UpperCAmelCase_ = scheduler.step_plms(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ).prev_sample
return sample
def lowercase__ ( self : str ) -> Tuple:
'''simple docstring'''
UpperCAmelCase_ = dict(self.forward_default_kwargs )
UpperCAmelCase_ = kwargs.pop("num_inference_steps" , _UpperCAmelCase )
for scheduler_class in self.scheduler_classes:
UpperCAmelCase_ = self.get_scheduler_config()
UpperCAmelCase_ = scheduler_class(**_UpperCAmelCase )
UpperCAmelCase_ = self.dummy_sample
UpperCAmelCase_ = 0.1 * sample
if num_inference_steps is not None and hasattr(_UpperCAmelCase , "set_timesteps" ):
scheduler.set_timesteps(_UpperCAmelCase )
elif num_inference_steps is not None and not hasattr(_UpperCAmelCase , "set_timesteps" ):
UpperCAmelCase_ = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
UpperCAmelCase_ = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
UpperCAmelCase_ = dummy_past_residuals[:]
UpperCAmelCase_ = scheduler.step_prk(_UpperCAmelCase , 0 , _UpperCAmelCase , **_UpperCAmelCase ).prev_sample
UpperCAmelCase_ = scheduler.step_prk(_UpperCAmelCase , 1 , _UpperCAmelCase , **_UpperCAmelCase ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
UpperCAmelCase_ = scheduler.step_plms(_UpperCAmelCase , 0 , _UpperCAmelCase , **_UpperCAmelCase ).prev_sample
UpperCAmelCase_ = scheduler.step_plms(_UpperCAmelCase , 1 , _UpperCAmelCase , **_UpperCAmelCase ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
def lowercase__ ( self : List[Any] ) -> Any:
'''simple docstring'''
for timesteps in [100, 1000]:
self.check_over_configs(num_train_timesteps=_UpperCAmelCase )
def lowercase__ ( self : str ) -> Optional[Any]:
'''simple docstring'''
for steps_offset in [0, 1]:
self.check_over_configs(steps_offset=_UpperCAmelCase )
UpperCAmelCase_ = self.scheduler_classes[0]
UpperCAmelCase_ = self.get_scheduler_config(steps_offset=1 )
UpperCAmelCase_ = scheduler_class(**_UpperCAmelCase )
scheduler.set_timesteps(10 )
assert torch.equal(
scheduler.timesteps , torch.LongTensor(
[901, 851, 851, 801, 801, 751, 751, 701, 701, 651, 651, 601, 601, 501, 401, 301, 201, 101, 1] ) , )
def lowercase__ ( self : List[str] ) -> Optional[Any]:
'''simple docstring'''
for beta_start, beta_end in zip([0.0001, 0.001] , [0.002, 0.02] ):
self.check_over_configs(beta_start=_UpperCAmelCase , beta_end=_UpperCAmelCase )
def lowercase__ ( self : Any ) -> Any:
'''simple docstring'''
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=_UpperCAmelCase )
def lowercase__ ( self : Optional[Any] ) -> str:
'''simple docstring'''
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=_UpperCAmelCase )
def lowercase__ ( self : Any ) -> List[Any]:
'''simple docstring'''
for t in [1, 5, 10]:
self.check_over_forward(time_step=_UpperCAmelCase )
def lowercase__ ( self : str ) -> Any:
'''simple docstring'''
for t, num_inference_steps in zip([1, 5, 10] , [10, 50, 100] ):
self.check_over_forward(num_inference_steps=_UpperCAmelCase )
def lowercase__ ( self : List[Any] ) -> List[str]:
'''simple docstring'''
UpperCAmelCase_ = 27
for scheduler_class in self.scheduler_classes:
UpperCAmelCase_ = self.dummy_sample
UpperCAmelCase_ = 0.1 * sample
UpperCAmelCase_ = self.get_scheduler_config()
UpperCAmelCase_ = scheduler_class(**_UpperCAmelCase )
scheduler.set_timesteps(_UpperCAmelCase )
# before power of 3 fix, would error on first step, so we only need to do two
for i, t in enumerate(scheduler.prk_timesteps[:2] ):
UpperCAmelCase_ = scheduler.step_prk(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ).prev_sample
def lowercase__ ( self : Tuple ) -> str:
'''simple docstring'''
with self.assertRaises(_UpperCAmelCase ):
UpperCAmelCase_ = self.scheduler_classes[0]
UpperCAmelCase_ = self.get_scheduler_config()
UpperCAmelCase_ = scheduler_class(**_UpperCAmelCase )
scheduler.step_plms(self.dummy_sample , 1 , self.dummy_sample ).prev_sample
def lowercase__ ( self : List[Any] ) -> str:
'''simple docstring'''
UpperCAmelCase_ = self.full_loop()
UpperCAmelCase_ = torch.sum(torch.abs(_UpperCAmelCase ) )
UpperCAmelCase_ = torch.mean(torch.abs(_UpperCAmelCase ) )
assert abs(result_sum.item() - 198.1318 ) < 1e-2
assert abs(result_mean.item() - 0.2580 ) < 1e-3
def lowercase__ ( self : Dict ) -> str:
'''simple docstring'''
UpperCAmelCase_ = self.full_loop(prediction_type="v_prediction" )
UpperCAmelCase_ = torch.sum(torch.abs(_UpperCAmelCase ) )
UpperCAmelCase_ = torch.mean(torch.abs(_UpperCAmelCase ) )
assert abs(result_sum.item() - 67.3986 ) < 1e-2
assert abs(result_mean.item() - 0.0878 ) < 1e-3
def lowercase__ ( self : str ) -> int:
'''simple docstring'''
UpperCAmelCase_ = self.full_loop(set_alpha_to_one=_UpperCAmelCase , beta_start=0.01 )
UpperCAmelCase_ = torch.sum(torch.abs(_UpperCAmelCase ) )
UpperCAmelCase_ = torch.mean(torch.abs(_UpperCAmelCase ) )
assert abs(result_sum.item() - 230.0399 ) < 1e-2
assert abs(result_mean.item() - 0.2995 ) < 1e-3
def lowercase__ ( self : Optional[int] ) -> int:
'''simple docstring'''
UpperCAmelCase_ = self.full_loop(set_alpha_to_one=_UpperCAmelCase , beta_start=0.01 )
UpperCAmelCase_ = torch.sum(torch.abs(_UpperCAmelCase ) )
UpperCAmelCase_ = torch.mean(torch.abs(_UpperCAmelCase ) )
assert abs(result_sum.item() - 186.9482 ) < 1e-2
assert abs(result_mean.item() - 0.2434 ) < 1e-3
| 362 |
"""simple docstring"""
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class lowercase__ ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
UpperCamelCase = ['''image_processor''', '''tokenizer''']
UpperCamelCase = '''ViTImageProcessor'''
UpperCamelCase = ('''CLIPTokenizer''', '''CLIPTokenizerFast''')
def __init__( self : List[Any] , _UpperCAmelCase : str=None , _UpperCAmelCase : Optional[int]=None , **_UpperCAmelCase : List[str] ) -> Any:
'''simple docstring'''
UpperCAmelCase_ = None
if "feature_extractor" in kwargs:
warnings.warn(
"The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"
" instead." , _UpperCAmelCase , )
UpperCAmelCase_ = kwargs.pop("feature_extractor" )
UpperCAmelCase_ = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("You need to specify an `image_processor`." )
if tokenizer is None:
raise ValueError("You need to specify a `tokenizer`." )
super().__init__(_UpperCAmelCase , _UpperCAmelCase )
def __call__( self : Optional[Any] , _UpperCAmelCase : Any=None , _UpperCAmelCase : Optional[int]=None , _UpperCAmelCase : str=None , _UpperCAmelCase : List[Any]=None , **_UpperCAmelCase : str ) -> Optional[int]:
'''simple docstring'''
if text is None and visual_prompt is None and images is None:
raise ValueError("You have to specify either text, visual prompt or images." )
if text is not None and visual_prompt is not None:
raise ValueError("You have to specify exactly one type of prompt. Either text or visual prompt." )
if text is not None:
UpperCAmelCase_ = self.tokenizer(_UpperCAmelCase , return_tensors=_UpperCAmelCase , **_UpperCAmelCase )
if visual_prompt is not None:
UpperCAmelCase_ = self.image_processor(_UpperCAmelCase , return_tensors=_UpperCAmelCase , **_UpperCAmelCase )
if images is not None:
UpperCAmelCase_ = self.image_processor(_UpperCAmelCase , return_tensors=_UpperCAmelCase , **_UpperCAmelCase )
if visual_prompt is not None and images is not None:
UpperCAmelCase_ = {
"pixel_values": image_features.pixel_values,
"conditional_pixel_values": prompt_features.pixel_values,
}
return encoding
elif text is not None and images is not None:
UpperCAmelCase_ = image_features.pixel_values
return encoding
elif text is not None:
return encoding
elif visual_prompt is not None:
UpperCAmelCase_ = {
"conditional_pixel_values": prompt_features.pixel_values,
}
return encoding
else:
return BatchEncoding(data=dict(**_UpperCAmelCase ) , tensor_type=_UpperCAmelCase )
def lowercase__ ( self : List[Any] , *_UpperCAmelCase : List[str] , **_UpperCAmelCase : List[Any] ) -> Any:
'''simple docstring'''
return self.tokenizer.batch_decode(*_UpperCAmelCase , **_UpperCAmelCase )
def lowercase__ ( self : Dict , *_UpperCAmelCase : Tuple , **_UpperCAmelCase : List[Any] ) -> Union[str, Any]:
'''simple docstring'''
return self.tokenizer.decode(*_UpperCAmelCase , **_UpperCAmelCase )
@property
def lowercase__ ( self : int ) -> Optional[int]:
'''simple docstring'''
warnings.warn(
"`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead." , _UpperCAmelCase , )
return self.image_processor_class
@property
def lowercase__ ( self : str ) -> List[Any]:
'''simple docstring'''
warnings.warn(
"`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead." , _UpperCAmelCase , )
return self.image_processor
| 241 | 0 |
'''simple docstring'''
import unittest
import numpy as np
import torch
from diffusers import PNDMPipeline, PNDMScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class a_ ( unittest.TestCase ):
@property
def A__ ( self ) -> List[str]:
"""simple docstring"""
torch.manual_seed(0 )
UpperCamelCase = UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=("""DownBlock2D""", """AttnDownBlock2D""") , up_block_types=("""AttnUpBlock2D""", """UpBlock2D""") , )
return model
def A__ ( self ) -> Any:
"""simple docstring"""
UpperCamelCase = self.dummy_uncond_unet
UpperCamelCase = PNDMScheduler()
UpperCamelCase = PNDMPipeline(unet=A__ , scheduler=A__ )
pndm.to(A__ )
pndm.set_progress_bar_config(disable=A__ )
UpperCamelCase = torch.manual_seed(0 )
UpperCamelCase = pndm(generator=A__ , num_inference_steps=20 , output_type="""numpy""" ).images
UpperCamelCase = torch.manual_seed(0 )
UpperCamelCase = pndm(generator=A__ , num_inference_steps=20 , output_type="""numpy""" , return_dict=A__ )[0]
UpperCamelCase = image[0, -3:, -3:, -1]
UpperCamelCase = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
UpperCamelCase = np.array([1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 0.0, 0.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
@slow
@require_torch
class a_ ( unittest.TestCase ):
def A__ ( self ) -> List[Any]:
"""simple docstring"""
UpperCamelCase = """google/ddpm-cifar10-32"""
UpperCamelCase = UNetaDModel.from_pretrained(A__ )
UpperCamelCase = PNDMScheduler()
UpperCamelCase = PNDMPipeline(unet=A__ , scheduler=A__ )
pndm.to(A__ )
pndm.set_progress_bar_config(disable=A__ )
UpperCamelCase = torch.manual_seed(0 )
UpperCamelCase = pndm(generator=A__ , output_type="""numpy""" ).images
UpperCamelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
UpperCamelCase = np.array([0.1_5_6_4, 0.1_4_6_4_5, 0.1_4_0_6, 0.1_4_7_1_5, 0.1_2_4_2_5, 0.1_4_0_4_5, 0.1_3_1_1_5, 0.1_2_1_7_5, 0.1_2_5] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 321 |
import os
from collections import namedtuple
import pytest
from datasets import ClassLabel, Features, Sequence, Value
from datasets.commands.test import TestCommand
from datasets.info import DatasetInfo, DatasetInfosDict
A_ : Any = namedtuple(
'_TestCommandArgs',
[
'dataset',
'name',
'cache_dir',
'data_dir',
'all_configs',
'save_infos',
'ignore_verifications',
'force_redownload',
'clear_cache',
],
defaults=[None, None, None, False, False, False, False, False],
)
def UpperCamelCase (lowercase_: Any , lowercase_: List[str] ) -> Optional[int]:
return (abs(source - target ) / target) < 0.01
@pytest.mark.integration
def UpperCamelCase (lowercase_: str ) -> str:
A__ : List[str] = _TestCommandArgs(dataset=lowercase_ , all_configs=lowercase_ , save_infos=lowercase_ )
A__ : int = TestCommand(*lowercase_ )
test_command.run()
A__ : Optional[Any] = os.path.join(lowercase_ , """README.md""" )
assert os.path.exists(lowercase_ )
A__ : Dict = DatasetInfosDict.from_directory(lowercase_ )
A__ : str = DatasetInfosDict(
{
"""default""": DatasetInfo(
features=Features(
{
"""tokens""": Sequence(Value("""string""" ) ),
"""ner_tags""": Sequence(
ClassLabel(names=["""O""", """B-PER""", """I-PER""", """B-ORG""", """I-ORG""", """B-LOC""", """I-LOC"""] ) ),
"""langs""": Sequence(Value("""string""" ) ),
"""spans""": Sequence(Value("""string""" ) ),
} ) , splits=[
{
"""name""": """train""",
"""num_bytes""": 2351563,
"""num_examples""": 10000,
},
{
"""name""": """validation""",
"""num_bytes""": 238418,
"""num_examples""": 1000,
},
] , download_size=3940680 , dataset_size=2589981 , )
} )
assert dataset_infos.keys() == expected_dataset_infos.keys()
for key in DatasetInfo._INCLUDED_INFO_IN_YAML:
A__ , A__ : Optional[Any] = getattr(dataset_infos["""default"""] , lowercase_ ), getattr(expected_dataset_infos["""default"""] , lowercase_ )
if key == "num_bytes":
assert is_apercent_close(lowercase_ , lowercase_ )
elif key == "splits":
assert list(lowercase_ ) == list(lowercase_ )
for split in result:
assert result[split].name == expected[split].name
assert result[split].num_examples == expected[split].num_examples
assert is_apercent_close(result[split].num_bytes , expected[split].num_bytes )
else:
result == expected
| 192 | 0 |
"""simple docstring"""
from math import loga
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : int ):
if a < 0:
raise ValueError('Input value must be a positive integer' )
elif isinstance(_UpperCAmelCase , _UpperCAmelCase ):
raise TypeError('Input value must be a \'int\' type' )
return 0 if (a == 0) else int(loga(a & -a ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 309 |
"""simple docstring"""
import os
import time
import warnings
from dataclasses import dataclass, field
from enum import Enum
from typing import List, Optional, Union
import torch
from filelock import FileLock
from torch.utils.data import Dataset
from ...tokenization_utils_base import PreTrainedTokenizerBase
from ...utils import logging
from ..processors.glue import glue_convert_examples_to_features, glue_output_modes, glue_processors
from ..processors.utils import InputFeatures
__UpperCamelCase : Optional[Any] = logging.get_logger(__name__)
@dataclass
class a :
snake_case__ = field(metadata={'''help''': '''The name of the task to train on: ''' + ''', '''.join(glue_processors.keys() )} )
snake_case__ = field(
metadata={'''help''': '''The input data dir. Should contain the .tsv files (or other data files) for the task.'''} )
snake_case__ = field(
default=1_2_8 , metadata={
'''help''': (
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
)
} , )
snake_case__ = field(
default=a__ , metadata={'''help''': '''Overwrite the cached training and evaluation sets'''} )
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = self.task_name.lower()
class a ( a__ ):
snake_case__ = '''train'''
snake_case__ = '''dev'''
snake_case__ = '''test'''
class a ( a__ ):
snake_case__ = 42
snake_case__ = 42
snake_case__ = 42
def __init__( self , _snake_case , _snake_case , _snake_case = None , _snake_case = Split.train , _snake_case = None , ):
"""simple docstring"""
warnings.warn(
'This dataset will be removed from the library soon, preprocessing should be handled with the 🤗 Datasets '
'library. You can have a look at this example script for pointers: '
'https://github.com/huggingface/transformers/blob/main/examples/pytorch/text-classification/run_glue.py' , _snake_case , )
lowerCAmelCase = args
lowerCAmelCase = glue_processors[args.task_name]()
lowerCAmelCase = glue_output_modes[args.task_name]
if isinstance(_snake_case , _snake_case ):
try:
lowerCAmelCase = Split[mode]
except KeyError:
raise KeyError('mode is not a valid split name' )
# Load data features from cache or dataset file
lowerCAmelCase = os.path.join(
cache_dir if cache_dir is not None else args.data_dir , F'cached_{mode.value}_{tokenizer.__class__.__name__}_{args.max_seq_length}_{args.task_name}' , )
lowerCAmelCase = self.processor.get_labels()
if args.task_name in ["mnli", "mnli-mm"] and tokenizer.__class__.__name__ in (
"RobertaTokenizer",
"RobertaTokenizerFast",
"XLMRobertaTokenizer",
"BartTokenizer",
"BartTokenizerFast",
):
# HACK(label indices are swapped in RoBERTa pretrained model)
lowerCAmelCase ,lowerCAmelCase = label_list[2], label_list[1]
lowerCAmelCase = label_list
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
lowerCAmelCase = cached_features_file + '.lock'
with FileLock(_snake_case ):
if os.path.exists(_snake_case ) and not args.overwrite_cache:
lowerCAmelCase = time.time()
lowerCAmelCase = torch.load(_snake_case )
logger.info(
F'Loading features from cached file {cached_features_file} [took %.3f s]' , time.time() - start )
else:
logger.info(F'Creating features from dataset file at {args.data_dir}' )
if mode == Split.dev:
lowerCAmelCase = self.processor.get_dev_examples(args.data_dir )
elif mode == Split.test:
lowerCAmelCase = self.processor.get_test_examples(args.data_dir )
else:
lowerCAmelCase = self.processor.get_train_examples(args.data_dir )
if limit_length is not None:
lowerCAmelCase = examples[:limit_length]
lowerCAmelCase = glue_convert_examples_to_features(
_snake_case , _snake_case , max_length=args.max_seq_length , label_list=_snake_case , output_mode=self.output_mode , )
lowerCAmelCase = time.time()
torch.save(self.features , _snake_case )
# ^ This seems to take a lot of time so I want to investigate why and how we can improve.
logger.info(
F'Saving features into cached file {cached_features_file} [took {time.time() - start:.3f} s]' )
def __len__( self ):
"""simple docstring"""
return len(self.features )
def __getitem__( self , _snake_case ):
"""simple docstring"""
return self.features[i]
def UpperCamelCase__ ( self ):
"""simple docstring"""
return self.label_list
| 309 | 1 |
from dataclasses import dataclass, field
from typing import Optional
from transformers import AutoConfig, AutoImageProcessor, AutoTokenizer, FlaxVisionEncoderDecoderModel, HfArgumentParser
@dataclass
class a__ :
_a : str = field(
metadata={"""help""": """The output directory where the model will be written."""} , )
_a : str = field(
metadata={
"""help""": (
"""The encoder model checkpoint for weights initialization."""
"""Don't set if you want to train an encoder model from scratch."""
)
} , )
_a : str = field(
metadata={
"""help""": (
"""The decoder model checkpoint for weights initialization."""
"""Don't set if you want to train a decoder model from scratch."""
)
} , )
_a : Optional[str] = field(
default=a_ , metadata={"""help""": """Pretrained encoder config name or path if not the same as encoder_model_name"""} )
_a : Optional[str] = field(
default=a_ , metadata={"""help""": """Pretrained decoder config name or path if not the same as decoder_model_name"""} )
def _a ( ):
__lowerCAmelCase = HfArgumentParser((ModelArguments,) )
((__lowerCAmelCase ) , ) = parser.parse_args_into_dataclasses()
# Load pretrained model and tokenizer
# Use explicit specified encoder config
if model_args.encoder_config_name:
__lowerCAmelCase = AutoConfig.from_pretrained(model_args.encoder_config_name )
# Use pretrained encoder model's config
else:
__lowerCAmelCase = AutoConfig.from_pretrained(model_args.encoder_model_name_or_path )
# Use explicit specified decoder config
if model_args.decoder_config_name:
__lowerCAmelCase = AutoConfig.from_pretrained(model_args.decoder_config_name )
# Use pretrained decoder model's config
else:
__lowerCAmelCase = AutoConfig.from_pretrained(model_args.decoder_model_name_or_path )
# necessary for `from_encoder_decoder_pretrained` when `decoder_config` is passed
__lowerCAmelCase = True
__lowerCAmelCase = True
__lowerCAmelCase = FlaxVisionEncoderDecoderModel.from_encoder_decoder_pretrained(
encoder_pretrained_model_name_or_path=model_args.encoder_model_name_or_path , decoder_pretrained_model_name_or_path=model_args.decoder_model_name_or_path , encoder_config=lowerCamelCase__ , decoder_config=lowerCamelCase__ , )
# GPT2 only has bos/eos tokens but not decoder_start/pad tokens
__lowerCAmelCase = decoder_config.decoder_start_token_id
__lowerCAmelCase = decoder_config.pad_token_id
if decoder_start_token_id is None:
__lowerCAmelCase = decoder_config.bos_token_id
if pad_token_id is None:
__lowerCAmelCase = decoder_config.eos_token_id
# This is necessary to make Flax's generate() work
__lowerCAmelCase = decoder_config.eos_token_id
__lowerCAmelCase = decoder_start_token_id
__lowerCAmelCase = pad_token_id
__lowerCAmelCase = AutoImageProcessor.from_pretrained(model_args.encoder_model_name_or_path )
__lowerCAmelCase = AutoTokenizer.from_pretrained(model_args.decoder_model_name_or_path )
__lowerCAmelCase = tokenizer.convert_ids_to_tokens(model.config.pad_token_id )
model.save_pretrained(model_args.output_dir )
image_processor.save_pretrained(model_args.output_dir )
tokenizer.save_pretrained(model_args.output_dir )
if __name__ == "__main__":
main()
| 92 |
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
from ..models.auto import AutoProcessor
from ..models.vision_encoder_decoder import VisionEncoderDecoderModel
from ..utils import is_vision_available
from .base import PipelineTool
if is_vision_available():
from PIL import Image
class __lowercase ( a_ ):
"""simple docstring"""
UpperCamelCase : Tuple = "naver-clova-ix/donut-base-finetuned-docvqa"
UpperCamelCase : Optional[int] = (
"This is a tool that answers a question about an document (pdf). It takes an input named `document` which "
"should be the document containing the information, as well as a `question` that is the question about the "
"document. It returns a text that contains the answer to the question."
)
UpperCamelCase : Optional[Any] = "document_qa"
UpperCamelCase : Any = AutoProcessor
UpperCamelCase : Optional[int] = VisionEncoderDecoderModel
UpperCamelCase : Any = ["image", "text"]
UpperCamelCase : str = ["text"]
def __init__( self , *A , **A ) -> Optional[Any]:
'''simple docstring'''
if not is_vision_available():
raise ValueError("""Pillow must be installed to use the DocumentQuestionAnsweringTool.""" )
super().__init__(*A , **A )
def __A ( self , A , A ) -> int:
'''simple docstring'''
lowerCamelCase = """<s_docvqa><s_question>{user_input}</s_question><s_answer>"""
lowerCamelCase = task_prompt.replace("""{user_input}""" , A )
lowerCamelCase = self.pre_processor.tokenizer(
A , add_special_tokens=A , return_tensors="""pt""" ).input_ids
lowerCamelCase = self.pre_processor(A , return_tensors="""pt""" ).pixel_values
return {"decoder_input_ids": decoder_input_ids, "pixel_values": pixel_values}
def __A ( self , A ) -> Optional[Any]:
'''simple docstring'''
return self.model.generate(
inputs["""pixel_values"""].to(self.device ) , decoder_input_ids=inputs["""decoder_input_ids"""].to(self.device ) , max_length=self.model.decoder.config.max_position_embeddings , early_stopping=A , pad_token_id=self.pre_processor.tokenizer.pad_token_id , eos_token_id=self.pre_processor.tokenizer.eos_token_id , use_cache=A , num_beams=1 , bad_words_ids=[[self.pre_processor.tokenizer.unk_token_id]] , return_dict_in_generate=A , ).sequences
def __A ( self , A ) -> int:
'''simple docstring'''
lowerCamelCase = self.pre_processor.batch_decode(A )[0]
lowerCamelCase = sequence.replace(self.pre_processor.tokenizer.eos_token , """""" )
lowerCamelCase = sequence.replace(self.pre_processor.tokenizer.pad_token , """""" )
lowerCamelCase = re.sub(r"""<.*?>""" , """""" , A , count=1 ).strip() # remove first task start token
lowerCamelCase = self.pre_processor.tokenajson(A )
return sequence["answer"]
| 252 | 0 |
from __future__ import annotations
import unittest
from transformers import AutoTokenizer, PegasusConfig, is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFAutoModelForSeqaSeqLM, TFPegasusForConditionalGeneration, TFPegasusModel
@require_tf
class SCREAMING_SNAKE_CASE__ :
A : List[Any] = PegasusConfig
A : Dict = {}
A : List[str] = "gelu"
def __init__( self : Tuple , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : Optional[int]=13 , _lowerCAmelCase : str=7 , _lowerCAmelCase : List[str]=True , _lowerCAmelCase : str=False , _lowerCAmelCase : int=99 , _lowerCAmelCase : Dict=32 , _lowerCAmelCase : str=2 , _lowerCAmelCase : Dict=4 , _lowerCAmelCase : str=37 , _lowerCAmelCase : Tuple=0.1 , _lowerCAmelCase : List[Any]=0.1 , _lowerCAmelCase : Any=40 , _lowerCAmelCase : Dict=2 , _lowerCAmelCase : List[str]=1 , _lowerCAmelCase : Optional[int]=0 , ):
__snake_case : Optional[Any] = parent
__snake_case : str = batch_size
__snake_case : str = seq_length
__snake_case : List[Any] = is_training
__snake_case : Union[str, Any] = use_labels
__snake_case : Optional[int] = vocab_size
__snake_case : Any = hidden_size
__snake_case : str = num_hidden_layers
__snake_case : Optional[Any] = num_attention_heads
__snake_case : str = intermediate_size
__snake_case : int = hidden_dropout_prob
__snake_case : str = attention_probs_dropout_prob
__snake_case : int = max_position_embeddings
__snake_case : Union[str, Any] = eos_token_id
__snake_case : Tuple = pad_token_id
__snake_case : Any = bos_token_id
def snake_case__ ( self : Dict ):
__snake_case : Any = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
__snake_case : Optional[Any] = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
__snake_case : List[str] = tf.concat([input_ids, eos_tensor] , axis=1 )
__snake_case : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__snake_case : str = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
__snake_case : Tuple = prepare_pegasus_inputs_dict(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
return config, inputs_dict
def snake_case__ ( self : List[Any] , _lowerCAmelCase : List[str] , _lowerCAmelCase : List[Any] ):
__snake_case : Optional[Any] = TFPegasusModel(config=_lowerCAmelCase ).get_decoder()
__snake_case : Optional[int] = inputs_dict["""input_ids"""]
__snake_case : Optional[int] = input_ids[:1, :]
__snake_case : Optional[int] = inputs_dict["""attention_mask"""][:1, :]
__snake_case : str = inputs_dict["""head_mask"""]
__snake_case : Tuple = 1
# first forward pass
__snake_case : Optional[int] = model(_lowerCAmelCase , attention_mask=_lowerCAmelCase , head_mask=_lowerCAmelCase , use_cache=_lowerCAmelCase )
__snake_case : int = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
__snake_case : Any = ids_tensor((self.batch_size, 3) , config.vocab_size )
__snake_case : int = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
__snake_case : str = tf.concat([input_ids, next_tokens] , axis=-1 )
__snake_case : List[str] = tf.concat([attention_mask, next_attn_mask] , axis=-1 )
__snake_case : Optional[int] = model(_lowerCAmelCase , attention_mask=_lowerCAmelCase )[0]
__snake_case : str = model(_lowerCAmelCase , attention_mask=_lowerCAmelCase , past_key_values=_lowerCAmelCase )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
__snake_case : Dict = int(ids_tensor((1,) , output_from_past.shape[-1] ) )
__snake_case : List[str] = output_from_no_past[:, -3:, random_slice_idx]
__snake_case : str = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(_lowerCAmelCase , _lowerCAmelCase , rtol=1e-3 )
def __lowerCAmelCase ( __SCREAMING_SNAKE_CASE : Any , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : Dict=None , __SCREAMING_SNAKE_CASE : List[Any]=None , __SCREAMING_SNAKE_CASE : Any=None , __SCREAMING_SNAKE_CASE : int=None , __SCREAMING_SNAKE_CASE : str=None , ):
'''simple docstring'''
if attention_mask is None:
__snake_case : Optional[Any] = tf.cast(tf.math.not_equal(__SCREAMING_SNAKE_CASE , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
__snake_case : Any = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
__snake_case : Dict = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
__snake_case : Tuple = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
__snake_case : List[Any] = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
@require_tf
class SCREAMING_SNAKE_CASE__ ( __UpperCamelCase , __UpperCamelCase , unittest.TestCase ):
A : Dict = (TFPegasusForConditionalGeneration, TFPegasusModel) if is_tf_available() else ()
A : Optional[int] = (TFPegasusForConditionalGeneration,) if is_tf_available() else ()
A : Union[str, Any] = (
{
"conversational": TFPegasusForConditionalGeneration,
"feature-extraction": TFPegasusModel,
"summarization": TFPegasusForConditionalGeneration,
"text2text-generation": TFPegasusForConditionalGeneration,
"translation": TFPegasusForConditionalGeneration,
}
if is_tf_available()
else {}
)
A : Union[str, Any] = True
A : Any = False
A : Dict = False
def snake_case__ ( self : Union[str, Any] ):
__snake_case : Optional[Any] = TFPegasusModelTester(self )
__snake_case : int = ConfigTester(self , config_class=_lowerCAmelCase )
def snake_case__ ( self : Optional[Any] ):
self.config_tester.run_common_tests()
def snake_case__ ( self : str ):
__snake_case : str = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*_lowerCAmelCase )
@require_sentencepiece
@require_tokenizers
@require_tf
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
A : Tuple = [
" PG&E stated it scheduled the blackouts in response to forecasts for high winds amid dry conditions. The aim is to reduce the risk of wildfires. Nearly 800 thousand customers were scheduled to be affected by the shutoffs which were expected to last through at least midday tomorrow.",
" The London trio are up for best UK act and best album, as well as getting two nominations in the best song category.\"We got told like this morning 'Oh I think you're nominated'\", said Dappy.\"And I was like 'Oh yeah, which one?' And now we've got nominated for four awards. I mean, wow!\"Bandmate Fazer added: \"We thought it's best of us to come down and mingle with everyone and say hello to the cameras. And now we find we've got four nominations.\"The band have two shots at the best song prize, getting the nod for their Tynchy Stryder collaboration Number One, and single Strong Again.Their album Uncle B will also go up against records by the likes of Beyonce and Kanye West.N-Dubz picked up the best newcomer Mobo in 2007, but female member Tulisa said they wouldn't be too disappointed if they didn't win this time around.\"At the end of the day we're grateful to be where we are in our careers.\"If it don't happen then it don't happen - live to fight another day and keep on making albums and hits for the fans.\"Dappy also revealed they could be performing live several times on the night.The group will be doing Number One and also a possible rendition of the War Child single, I Got Soul.The charity song is a re-working of The Killers' All These Things That I've Done and is set to feature artists like Chipmunk, Ironik and Pixie Lott.This year's Mobos will be held outside of London for the first time, in Glasgow on 30 September.N-Dubz said they were looking forward to performing for their Scottish fans and boasted about their recent shows north of the border.\"We just done Edinburgh the other day,\" said Dappy.\"We smashed up an N-Dubz show over there. We done Aberdeen about three or four months ago - we smashed up that show over there! Everywhere we go we smash it up!\" ",
]
A : Dict = [
"California's largest electricity provider has cut power to hundreds of thousands of customers in an effort to"
" reduce the risk of wildfires.",
"N-Dubz have revealed they\'re \"grateful\" to have been nominated for four Mobo Awards.",
] # differs slightly from pytorch, likely due to numerical differences in linear layers
A : Union[str, Any] = "google/pegasus-xsum"
@cached_property
def snake_case__ ( self : Optional[int] ):
return AutoTokenizer.from_pretrained(self.model_name )
@cached_property
def snake_case__ ( self : List[str] ):
__snake_case : str = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name )
return model
def snake_case__ ( self : Tuple , **_lowerCAmelCase : Optional[Any] ):
__snake_case : Dict = self.translate_src_text(**_lowerCAmelCase )
assert self.expected_text == generated_words
def snake_case__ ( self : Optional[Any] , **_lowerCAmelCase : Optional[int] ):
__snake_case : Tuple = self.tokenizer(self.src_text , **_lowerCAmelCase , padding=_lowerCAmelCase , return_tensors="""tf""" )
__snake_case : Dict = self.model.generate(
model_inputs.input_ids , attention_mask=model_inputs.attention_mask , num_beams=2 , use_cache=_lowerCAmelCase , )
__snake_case : Any = self.tokenizer.batch_decode(generated_ids.numpy() , skip_special_tokens=_lowerCAmelCase )
return generated_words
@slow
def snake_case__ ( self : int ):
self._assert_generated_batch_equal_expected()
| 354 | from typing import List, Union
from ..utils import (
add_end_docstrings,
is_tf_available,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_tf_available():
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_VISION_2_SEQ_MAPPING
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_VISION_2_SEQ_MAPPING
lowercase_ = logging.get_logger(__name__)
@add_end_docstrings(__UpperCamelCase )
class SCREAMING_SNAKE_CASE__ ( __UpperCamelCase ):
def __init__( self : Dict , *_lowerCAmelCase : Dict , **_lowerCAmelCase : int ):
super().__init__(*_lowerCAmelCase , **_lowerCAmelCase )
requires_backends(self , """vision""" )
self.check_model_type(
TF_MODEL_FOR_VISION_2_SEQ_MAPPING if self.framework == """tf""" else MODEL_FOR_VISION_2_SEQ_MAPPING )
def snake_case__ ( self : List[Any] , _lowerCAmelCase : List[str]=None , _lowerCAmelCase : Any=None , _lowerCAmelCase : List[str]=None ):
__snake_case : Optional[Any] = {}
__snake_case : int = {}
if prompt is not None:
__snake_case : Dict = prompt
if generate_kwargs is not None:
__snake_case : List[Any] = generate_kwargs
if max_new_tokens is not None:
if "generate_kwargs" not in forward_kwargs:
__snake_case : Optional[int] = {}
if "max_new_tokens" in forward_kwargs["generate_kwargs"]:
raise ValueError(
"""'max_new_tokens' is defined twice, once in 'generate_kwargs' and once as a direct parameter,"""
""" please use only one""" )
__snake_case : Any = max_new_tokens
return preprocess_params, forward_kwargs, {}
def __call__( self : Optional[Any] , _lowerCAmelCase : Union[str, List[str], "Image.Image", List["Image.Image"]] , **_lowerCAmelCase : Union[str, Any] ):
return super().__call__(_lowerCAmelCase , **_lowerCAmelCase )
def snake_case__ ( self : Optional[Any] , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : List[str]=None ):
__snake_case : Optional[Any] = load_image(_lowerCAmelCase )
if prompt is not None:
if not isinstance(_lowerCAmelCase , _lowerCAmelCase ):
raise ValueError(
f'''Received an invalid text input, got - {type(_lowerCAmelCase )} - but expected a single string. '''
"""Note also that one single text can be provided for conditional image to text generation.""" )
__snake_case : Tuple = self.model.config.model_type
if model_type == "git":
__snake_case : Tuple = self.image_processor(images=_lowerCAmelCase , return_tensors=self.framework )
__snake_case : Any = self.tokenizer(text=_lowerCAmelCase , add_special_tokens=_lowerCAmelCase ).input_ids
__snake_case : Tuple = [self.tokenizer.cls_token_id] + input_ids
__snake_case : int = torch.tensor(_lowerCAmelCase ).unsqueeze(0 )
model_inputs.update({"""input_ids""": input_ids} )
elif model_type == "pix2struct":
__snake_case : Dict = self.image_processor(images=_lowerCAmelCase , header_text=_lowerCAmelCase , return_tensors=self.framework )
elif model_type != "vision-encoder-decoder":
# vision-encoder-decoder does not support conditional generation
__snake_case : int = self.image_processor(images=_lowerCAmelCase , return_tensors=self.framework )
__snake_case : Optional[Any] = self.tokenizer(_lowerCAmelCase , return_tensors=self.framework )
model_inputs.update(_lowerCAmelCase )
else:
raise ValueError(f'''Model type {model_type} does not support conditional text generation''' )
else:
__snake_case : Tuple = self.image_processor(images=_lowerCAmelCase , return_tensors=self.framework )
if self.model.config.model_type == "git" and prompt is None:
__snake_case : int = None
return model_inputs
def snake_case__ ( self : Union[str, Any] , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : str=None ):
# Git model sets `model_inputs["input_ids"] = None` in `preprocess` (when `prompt=None`). In batch model, the
# pipeline will group them into a list of `None`, which fail `_forward`. Avoid this by checking it first.
if (
"input_ids" in model_inputs
and isinstance(model_inputs["""input_ids"""] , _lowerCAmelCase )
and all(x is None for x in model_inputs["""input_ids"""] )
):
__snake_case : List[Any] = None
if generate_kwargs is None:
__snake_case : Dict = {}
# FIXME: We need to pop here due to a difference in how `generation.py` and `generation.tf_utils.py`
# parse inputs. In the Tensorflow version, `generate` raises an error if we don't use `input_ids` whereas
# the PyTorch version matches it with `self.model.main_input_name` or `self.model.encoder.main_input_name`
# in the `_prepare_model_inputs` method.
__snake_case : Dict = model_inputs.pop(self.model.main_input_name )
__snake_case : Optional[int] = self.model.generate(_lowerCAmelCase , **_lowerCAmelCase , **_lowerCAmelCase )
return model_outputs
def snake_case__ ( self : List[Any] , _lowerCAmelCase : str ):
__snake_case : Union[str, Any] = []
for output_ids in model_outputs:
__snake_case : Union[str, Any] = {
"""generated_text""": self.tokenizer.decode(
_lowerCAmelCase , skip_special_tokens=_lowerCAmelCase , )
}
records.append(_lowerCAmelCase )
return records
| 20 | 0 |
def UpperCamelCase ( __lowercase : int ):
'''simple docstring'''
if bit_count < 0:
raise ValueError('The given input must be positive' )
# get the generated string sequence
A_ : List[Any] = gray_code_sequence_string(__lowercase )
#
# convert them to integers
for i in range(len(__lowercase ) ):
A_ : List[Any] = int(sequence[i] ,2 )
return sequence
def UpperCamelCase ( __lowercase : int ):
'''simple docstring'''
if bit_count == 0:
return ["0"]
if bit_count == 1:
return ["0", "1"]
A_ : Union[str, Any] = 1 << bit_count # defines the length of the sequence
# 1<< n is equivalent to 2^n
# recursive answer will generate answer for n-1 bits
A_ : Optional[Any] = gray_code_sequence_string(bit_count - 1 )
A_ : Any = []
# append 0 to first half of the smaller sequence generated
for i in range(seq_len // 2 ):
A_ : Tuple = '0' + smaller_sequence[i]
sequence.append(__lowercase )
# append 1 to second half ... start from the end of the list
for i in reversed(range(seq_len // 2 ) ):
A_ : Any = '1' + smaller_sequence[i]
sequence.append(__lowercase )
return sequence
if __name__ == "__main__":
import doctest
doctest.testmod()
| 140 | import os
from shutil import copyfile
from typing import List, Optional, Tuple
from tokenizers import processors
from ...tokenization_utils import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_mbart import MBartTokenizer
else:
_UpperCAmelCase = None
_UpperCAmelCase = logging.get_logger(__name__)
_UpperCAmelCase = {"""vocab_file""": """sentencepiece.bpe.model""", """tokenizer_file""": """tokenizer.json"""}
_UpperCAmelCase = {
"""vocab_file""": {
"""facebook/mbart-large-en-ro""": (
"""https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/sentencepiece.bpe.model"""
),
"""facebook/mbart-large-cc25""": (
"""https://huggingface.co/facebook/mbart-large-cc25/resolve/main/sentencepiece.bpe.model"""
),
},
"""tokenizer_file""": {
"""facebook/mbart-large-en-ro""": """https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/tokenizer.json""",
"""facebook/mbart-large-cc25""": """https://huggingface.co/facebook/mbart-large-cc25/resolve/main/tokenizer.json""",
},
}
_UpperCAmelCase = {
"""facebook/mbart-large-en-ro""": 1024,
"""facebook/mbart-large-cc25""": 1024,
}
# fmt: off
_UpperCAmelCase = ["""ar_AR""", """cs_CZ""", """de_DE""", """en_XX""", """es_XX""", """et_EE""", """fi_FI""", """fr_XX""", """gu_IN""", """hi_IN""", """it_IT""", """ja_XX""", """kk_KZ""", """ko_KR""", """lt_LT""", """lv_LV""", """my_MM""", """ne_NP""", """nl_XX""", """ro_RO""", """ru_RU""", """si_LK""", """tr_TR""", """vi_VN""", """zh_CN"""]
class UpperCAmelCase ( __A ):
'''simple docstring'''
lowerCamelCase_ = VOCAB_FILES_NAMES
lowerCamelCase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase_ = PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase_ = ['''input_ids''', '''attention_mask''']
lowerCamelCase_ = MBartTokenizer
lowerCamelCase_ = []
lowerCamelCase_ = []
def __init__( self , lowercase=None , lowercase=None , lowercase="<s>" , lowercase="</s>" , lowercase="</s>" , lowercase="<s>" , lowercase="<unk>" , lowercase="<pad>" , lowercase="<mask>" , lowercase=None , lowercase=None , lowercase=None , **lowercase , ):
"""simple docstring"""
A_ : List[Any] = AddedToken(lowercase , lstrip=lowercase , rstrip=lowercase ) if isinstance(lowercase , lowercase ) else mask_token
super().__init__(
vocab_file=lowercase , tokenizer_file=lowercase , bos_token=lowercase , eos_token=lowercase , sep_token=lowercase , cls_token=lowercase , unk_token=lowercase , pad_token=lowercase , mask_token=lowercase , src_lang=lowercase , tgt_lang=lowercase , additional_special_tokens=lowercase , **lowercase , )
A_ : Union[str, Any] = vocab_file
A_ : Optional[int] = False if not self.vocab_file else True
A_ : Optional[int] = FAIRSEQ_LANGUAGE_CODES.copy()
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
_additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in _additional_special_tokens] )
self.add_special_tokens({'additional_special_tokens': _additional_special_tokens} )
A_ : Tuple = {
lang_code: self.convert_tokens_to_ids(lowercase ) for lang_code in FAIRSEQ_LANGUAGE_CODES
}
A_ : Dict = src_lang if src_lang is not None else 'en_XX'
A_ : Dict = self.convert_tokens_to_ids(self._src_lang )
A_ : Optional[int] = tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
@property
def lowerCAmelCase_ ( self ):
"""simple docstring"""
return self._src_lang
@src_lang.setter
def lowerCAmelCase_ ( self , lowercase ):
"""simple docstring"""
A_ : Tuple = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def lowerCAmelCase_ ( self , lowercase , lowercase = None ):
"""simple docstring"""
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def lowerCAmelCase_ ( self , lowercase , lowercase = None ):
"""simple docstring"""
A_ : List[Any] = [self.sep_token_id]
A_ : List[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def lowerCAmelCase_ ( self , lowercase , lowercase , lowercase , lowercase , **lowercase ):
"""simple docstring"""
if src_lang is None or tgt_lang is None:
raise ValueError('Translation requires a `src_lang` and a `tgt_lang` for this model' )
A_ : int = src_lang
A_ : Optional[int] = self(lowercase , add_special_tokens=lowercase , return_tensors=lowercase , **lowercase )
A_ : Optional[Any] = self.convert_tokens_to_ids(lowercase )
A_ : Dict = tgt_lang_id
return inputs
def lowerCAmelCase_ ( self , lowercase , lowercase = "en_XX" , lowercase = None , lowercase = "ro_RO" , **lowercase , ):
"""simple docstring"""
A_ : Union[str, Any] = src_lang
A_ : Dict = tgt_lang
return super().prepare_seqaseq_batch(lowercase , lowercase , **lowercase )
def lowerCAmelCase_ ( self ):
"""simple docstring"""
return self.set_src_lang_special_tokens(self.src_lang )
def lowerCAmelCase_ ( self ):
"""simple docstring"""
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def lowerCAmelCase_ ( self , lowercase ):
"""simple docstring"""
A_ : Any = self.convert_tokens_to_ids(lowercase )
A_ : Optional[Any] = []
A_ : Optional[int] = [self.eos_token_id, self.cur_lang_code]
A_ : Optional[int] = self.convert_ids_to_tokens(self.prefix_tokens )
A_ : Union[str, Any] = self.convert_ids_to_tokens(self.suffix_tokens )
A_ : Dict = processors.TemplateProcessing(
single=prefix_tokens_str + ['$A'] + suffix_tokens_str , pair=prefix_tokens_str + ['$A', '$B'] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def lowerCAmelCase_ ( self , lowercase ):
"""simple docstring"""
A_ : Union[str, Any] = self.convert_tokens_to_ids(lowercase )
A_ : List[Any] = []
A_ : Union[str, Any] = [self.eos_token_id, self.cur_lang_code]
A_ : Any = self.convert_ids_to_tokens(self.prefix_tokens )
A_ : Optional[int] = self.convert_ids_to_tokens(self.suffix_tokens )
A_ : Union[str, Any] = processors.TemplateProcessing(
single=prefix_tokens_str + ['$A'] + suffix_tokens_str , pair=prefix_tokens_str + ['$A', '$B'] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def lowerCAmelCase_ ( self , lowercase , lowercase = None ):
"""simple docstring"""
if not self.can_save_slow_tokenizer:
raise ValueError(
'Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '
'tokenizer.' )
if not os.path.isdir(lowercase ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory.''' )
return
A_ : Dict = os.path.join(
lowercase , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowercase ):
copyfile(self.vocab_file , lowercase )
return (out_vocab_file,)
| 140 | 1 |
def a__ ( UpperCAmelCase : int , UpperCAmelCase : int ) -> str:
if a < 0 or b < 0:
raise ValueError('''the value of both inputs must be positive''' )
UpperCAmelCase : List[Any] = str(bin(UpperCAmelCase ) )[2:] # remove the leading "0b"
UpperCAmelCase : List[str] = str(bin(UpperCAmelCase ) )[2:]
UpperCAmelCase : Optional[Any] = max(len(UpperCAmelCase ) , len(UpperCAmelCase ) )
return "0b" + "".join(
str(int('''1''' in (char_a, char_b) ) )
for char_a, char_b in zip(a_binary.zfill(UpperCAmelCase ) , b_binary.zfill(UpperCAmelCase ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 99 |
from typing import Union
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING
_lowerCamelCase : Dict = logging.get_logger(__name__)
@add_end_docstrings(lowerCamelCase__ )
class __UpperCAmelCase ( lowerCamelCase__ ):
def __init__( self : Optional[Any], *__A : Tuple, **__A : Tuple ):
super().__init__(*__A, **__A )
self.check_model_type(__A )
def __magic_name__ ( self : Union[str, Any], __A : int=None, __A : Tuple=None, __A : Any=None, **__A : Optional[int] ):
UpperCAmelCase , UpperCAmelCase : List[Any] = {}, {}
if padding is not None:
UpperCAmelCase : Optional[int] = padding
if truncation is not None:
UpperCAmelCase : Optional[int] = truncation
if top_k is not None:
UpperCAmelCase : Tuple = top_k
return preprocess_params, {}, postprocess_params
def __call__( self : Union[str, Any], __A : Union["Image.Image", str], __A : str = None, **__A : Optional[int] ):
if isinstance(__A, (Image.Image, str) ) and isinstance(__A, __A ):
UpperCAmelCase : int = {'''image''': image, '''question''': question}
else:
UpperCAmelCase : str = image
UpperCAmelCase : Union[str, Any] = super().__call__(__A, **__A )
return results
def __magic_name__ ( self : List[str], __A : Union[str, Any], __A : Tuple=False, __A : List[Any]=False ):
UpperCAmelCase : int = load_image(inputs['''image'''] )
UpperCAmelCase : List[str] = self.tokenizer(
inputs['''question'''], return_tensors=self.framework, padding=__A, truncation=__A )
UpperCAmelCase : Union[str, Any] = self.image_processor(images=__A, return_tensors=self.framework )
model_inputs.update(__A )
return model_inputs
def __magic_name__ ( self : Optional[Any], __A : List[Any] ):
UpperCAmelCase : Optional[int] = self.model(**__A )
return model_outputs
def __magic_name__ ( self : Any, __A : List[str], __A : Union[str, Any]=5 ):
if top_k > self.model.config.num_labels:
UpperCAmelCase : Any = self.model.config.num_labels
if self.framework == "pt":
UpperCAmelCase : Any = model_outputs.logits.sigmoid()[0]
UpperCAmelCase , UpperCAmelCase : Union[str, Any] = probs.topk(__A )
else:
raise ValueError(F'''Unsupported framework: {self.framework}''' )
UpperCAmelCase : str = scores.tolist()
UpperCAmelCase : Tuple = ids.tolist()
return [{"score": score, "answer": self.model.config.idalabel[_id]} for score, _id in zip(__A, __A )]
| 99 | 1 |
'''simple docstring'''
import cmath
import math
def snake_case_ ( _lowerCAmelCase : float , _lowerCAmelCase : float , _lowerCAmelCase : float , _lowerCAmelCase : float ) -> complex:
UpperCAmelCase : Tuple = math.radians(_lowerCAmelCase )
UpperCAmelCase : str = math.radians(_lowerCAmelCase )
# Convert voltage and current to rectangular form
UpperCAmelCase : Union[str, Any] = cmath.rect(_lowerCAmelCase , _lowerCAmelCase )
UpperCAmelCase : str = cmath.rect(_lowerCAmelCase , _lowerCAmelCase )
# Calculate apparent power
return voltage_rect * current_rect
if __name__ == "__main__":
import doctest
doctest.testmod()
| 23 |
'''simple docstring'''
import random
from .binary_exp_mod import bin_exp_mod
def snake_case_ ( _lowerCAmelCase : Tuple , _lowerCAmelCase : Optional[Any]=1000 ) -> int:
if n < 2:
return False
if n % 2 == 0:
return n == 2
# this means n is odd
UpperCAmelCase : str = n - 1
UpperCAmelCase : List[Any] = 0
while d % 2 == 0:
d /= 2
exp += 1
# n - 1=d*(2**exp)
UpperCAmelCase : List[str] = 0
while count < prec:
UpperCAmelCase : int = random.randint(2 , n - 1 )
UpperCAmelCase : List[str] = bin_exp_mod(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
if b != 1:
UpperCAmelCase : int = True
for _ in range(_lowerCAmelCase ):
if b == n - 1:
UpperCAmelCase : Dict = False
break
UpperCAmelCase : str = b * b
b %= n
if flag:
return False
count += 1
return True
if __name__ == "__main__":
UpperCamelCase__: Optional[int] = abs(int(input("Enter bound : ").strip()))
print("Here's the list of primes:")
print(", ".join(str(i) for i in range(n + 1) if is_prime_big(i)))
| 23 | 1 |
import inspect
import unittest
from transformers import ConvNextConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import ConvNextBackbone, ConvNextForImageClassification, ConvNextModel
from transformers.models.convnext.modeling_convnext import CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __init__( self : Any , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Dict=13 , UpperCAmelCase_ : Union[str, Any]=32 , UpperCAmelCase_ : Dict=3 , UpperCAmelCase_ : Any=4 , UpperCAmelCase_ : Any=[10, 20, 30, 40] , UpperCAmelCase_ : Optional[int]=[2, 2, 3, 2] , UpperCAmelCase_ : Tuple=True , UpperCAmelCase_ : str=True , UpperCAmelCase_ : str=37 , UpperCAmelCase_ : Optional[int]="gelu" , UpperCAmelCase_ : Dict=10 , UpperCAmelCase_ : Union[str, Any]=0.02 , UpperCAmelCase_ : List[Any]=["stage2", "stage3", "stage4"] , UpperCAmelCase_ : Union[str, Any]=[2, 3, 4] , UpperCAmelCase_ : List[Any]=None , ):
SCREAMING_SNAKE_CASE : Dict = parent
SCREAMING_SNAKE_CASE : Optional[Any] = batch_size
SCREAMING_SNAKE_CASE : Tuple = image_size
SCREAMING_SNAKE_CASE : List[Any] = num_channels
SCREAMING_SNAKE_CASE : Tuple = num_stages
SCREAMING_SNAKE_CASE : str = hidden_sizes
SCREAMING_SNAKE_CASE : Any = depths
SCREAMING_SNAKE_CASE : Union[str, Any] = is_training
SCREAMING_SNAKE_CASE : List[Any] = use_labels
SCREAMING_SNAKE_CASE : Optional[Any] = intermediate_size
SCREAMING_SNAKE_CASE : Optional[int] = hidden_act
SCREAMING_SNAKE_CASE : str = num_labels
SCREAMING_SNAKE_CASE : Optional[Any] = initializer_range
SCREAMING_SNAKE_CASE : Dict = out_features
SCREAMING_SNAKE_CASE : str = out_indices
SCREAMING_SNAKE_CASE : Optional[Any] = scope
def _A ( self : str ):
SCREAMING_SNAKE_CASE : int = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE : List[str] = None
if self.use_labels:
SCREAMING_SNAKE_CASE : Union[str, Any] = ids_tensor([self.batch_size] , self.num_labels )
SCREAMING_SNAKE_CASE : str = self.get_config()
return config, pixel_values, labels
def _A ( self : Tuple ):
return ConvNextConfig(
num_channels=self.num_channels , hidden_sizes=self.hidden_sizes , depths=self.depths , num_stages=self.num_stages , hidden_act=self.hidden_act , is_decoder=a__ , initializer_range=self.initializer_range , out_features=self.out_features , out_indices=self.out_indices , num_labels=self.num_labels , )
def _A ( self : Dict , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Any , UpperCAmelCase_ : Tuple ):
SCREAMING_SNAKE_CASE : Optional[Any] = ConvNextModel(config=a__ )
model.to(a__ )
model.eval()
SCREAMING_SNAKE_CASE : int = model(a__ )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def _A ( self : int , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Dict ):
SCREAMING_SNAKE_CASE : Union[str, Any] = ConvNextForImageClassification(a__ )
model.to(a__ )
model.eval()
SCREAMING_SNAKE_CASE : Any = model(a__ , labels=a__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _A ( self : Optional[Any] , UpperCAmelCase_ : Dict , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Union[str, Any] ):
SCREAMING_SNAKE_CASE : str = ConvNextBackbone(config=a__ )
model.to(a__ )
model.eval()
SCREAMING_SNAKE_CASE : Optional[int] = model(a__ )
# verify hidden states
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[1], 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , config.hidden_sizes[1:] )
# verify backbone works with out_features=None
SCREAMING_SNAKE_CASE : str = None
SCREAMING_SNAKE_CASE : int = ConvNextBackbone(config=a__ )
model.to(a__ )
model.eval()
SCREAMING_SNAKE_CASE : Union[str, Any] = model(a__ )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[-1], 1, 1] )
# verify channels
self.parent.assertEqual(len(model.channels ) , 1 )
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] )
def _A ( self : int ):
SCREAMING_SNAKE_CASE : Any = self.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[int] = config_and_inputs
SCREAMING_SNAKE_CASE : str = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class SCREAMING_SNAKE_CASE ( __a , __a , unittest.TestCase ):
'''simple docstring'''
UpperCamelCase_ : Dict = (
(
ConvNextModel,
ConvNextForImageClassification,
ConvNextBackbone,
)
if is_torch_available()
else ()
)
UpperCamelCase_ : Union[str, Any] = (
{"""feature-extraction""": ConvNextModel, """image-classification""": ConvNextForImageClassification}
if is_torch_available()
else {}
)
UpperCamelCase_ : Optional[int] = True
UpperCamelCase_ : str = False
UpperCamelCase_ : List[Any] = False
UpperCamelCase_ : Union[str, Any] = False
UpperCamelCase_ : int = False
def _A ( self : int ):
SCREAMING_SNAKE_CASE : List[Any] = ConvNextModelTester(self )
SCREAMING_SNAKE_CASE : int = ConfigTester(self , config_class=a__ , has_text_modality=a__ , hidden_size=37 )
def _A ( self : Optional[int] ):
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def _A ( self : str ):
return
@unittest.skip(reason="ConvNext does not use inputs_embeds" )
def _A ( self : Tuple ):
pass
@unittest.skip(reason="ConvNext does not support input and output embeddings" )
def _A ( self : Dict ):
pass
@unittest.skip(reason="ConvNext does not use feedforward chunking" )
def _A ( self : Union[str, Any] ):
pass
def _A ( self : Any ):
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE : str = model_class(a__ )
SCREAMING_SNAKE_CASE : List[str] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
SCREAMING_SNAKE_CASE : Tuple = [*signature.parameters.keys()]
SCREAMING_SNAKE_CASE : str = ["pixel_values"]
self.assertListEqual(arg_names[:1] , a__ )
def _A ( self : List[str] ):
SCREAMING_SNAKE_CASE : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*a__ )
def _A ( self : Optional[int] ):
SCREAMING_SNAKE_CASE : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*a__ )
def _A ( self : Optional[int] ):
def check_hidden_states_output(UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Optional[Any] ):
SCREAMING_SNAKE_CASE : List[str] = model_class(a__ )
model.to(a__ )
model.eval()
with torch.no_grad():
SCREAMING_SNAKE_CASE : int = model(**self._prepare_for_class(a__ , a__ ) )
SCREAMING_SNAKE_CASE : List[Any] = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
SCREAMING_SNAKE_CASE : Dict = self.model_tester.num_stages
self.assertEqual(len(a__ ) , expected_num_stages + 1 )
# ConvNext's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE : Dict = True
check_hidden_states_output(a__ , a__ , a__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
SCREAMING_SNAKE_CASE : Any = True
check_hidden_states_output(a__ , a__ , a__ )
def _A ( self : Any ):
SCREAMING_SNAKE_CASE : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*a__ )
@slow
def _A ( self : Union[str, Any] ):
for model_name in CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE : List[str] = ConvNextModel.from_pretrained(a__ )
self.assertIsNotNone(a__ )
def lowerCamelCase__ ( ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[str] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def _A ( self : Any ):
return AutoImageProcessor.from_pretrained("facebook/convnext-tiny-224" ) if is_vision_available() else None
@slow
def _A ( self : Optional[int] ):
SCREAMING_SNAKE_CASE : Optional[int] = ConvNextForImageClassification.from_pretrained("facebook/convnext-tiny-224" ).to(a__ )
SCREAMING_SNAKE_CASE : Optional[int] = self.default_image_processor
SCREAMING_SNAKE_CASE : Optional[int] = prepare_img()
SCREAMING_SNAKE_CASE : str = image_processor(images=a__ , return_tensors="pt" ).to(a__ )
# forward pass
with torch.no_grad():
SCREAMING_SNAKE_CASE : List[str] = model(**a__ )
# verify the logits
SCREAMING_SNAKE_CASE : Optional[int] = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , a__ )
SCREAMING_SNAKE_CASE : Optional[int] = torch.tensor([-0.0_260, -0.4_739, 0.1_911] ).to(a__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , a__ , atol=1E-4 ) )
@require_torch
class SCREAMING_SNAKE_CASE ( unittest.TestCase , __a ):
'''simple docstring'''
UpperCamelCase_ : List[Any] = (ConvNextBackbone,) if is_torch_available() else ()
UpperCamelCase_ : List[str] = ConvNextConfig
UpperCamelCase_ : int = False
def _A ( self : List[str] ):
SCREAMING_SNAKE_CASE : Optional[Any] = ConvNextModelTester(self )
| 360 |
# limitations under the License.
# NOTE: This file is deprecated and will be removed in a future version.
# It only exists so that temporarely `from diffusers.pipelines import DiffusionPipeline` works
from .pipelines import DiffusionPipeline, ImagePipelineOutput # noqa: F401
from .utils import deprecate
deprecate(
"""pipelines_utils""",
"""0.22.0""",
"""Importing `DiffusionPipeline` or `ImagePipelineOutput` from diffusers.pipeline_utils is deprecated. Please import from diffusers.pipelines.pipeline_utils instead.""",
standard_warn=False,
stacklevel=3,
)
| 319 | 0 |
from __future__ import annotations
def __snake_case ( _UpperCAmelCase ):
create_state_space_tree(_UpperCAmelCase , [] , 0 , [0 for i in range(len(_UpperCAmelCase ) )] )
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , ):
if index == len(_UpperCAmelCase ):
print(_UpperCAmelCase )
return
for i in range(len(_UpperCAmelCase ) ):
if not index_used[i]:
current_sequence.append(sequence[i] )
__a = True
create_state_space_tree(_UpperCAmelCase , _UpperCAmelCase , index + 1 , _UpperCAmelCase )
current_sequence.pop()
__a = False
__snake_case :list[int | str] = [3, 1, 2, 4]
generate_all_permutations(sequence)
__snake_case :list[int | str] = ["A", "B", "C"]
generate_all_permutations(sequence_a)
| 49 |
from ..utils import DummyObject, requires_backends
class __lowerCAmelCase ( metaclass=lowerCAmelCase_ ):
"""simple docstring"""
A__ : Tuple = ['''speech''']
def __init__( self : List[Any] , *_snake_case : str , **_snake_case : List[Any] ):
requires_backends(self , ['''speech'''] )
class __lowerCAmelCase ( metaclass=lowerCAmelCase_ ):
"""simple docstring"""
A__ : List[Any] = ['''speech''']
def __init__( self : List[str] , *_snake_case : List[Any] , **_snake_case : Dict ):
requires_backends(self , ['''speech'''] )
| 156 | 0 |
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DiffusionPipeline,
EulerDiscreteScheduler,
StableDiffusionXLImgaImgPipeline,
UNetaDConditionModel,
)
from diffusers.utils import floats_tensor, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class lowercase_ ( __snake_case , __snake_case , unittest.TestCase ):
_lowerCamelCase = StableDiffusionXLImgaImgPipeline
_lowerCamelCase = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {'height', 'width'}
_lowerCamelCase = PipelineTesterMixin.required_optional_params - {'latents'}
_lowerCamelCase = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
_lowerCamelCase = IMAGE_TO_IMAGE_IMAGE_PARAMS
_lowerCamelCase = IMAGE_TO_IMAGE_IMAGE_PARAMS
def UpperCamelCase ( self ):
torch.manual_seed(0 )
_snake_case : str = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , up_block_types=("CrossAttnUpBlock2D", "UpBlock2D") , attention_head_dim=(2, 4) , use_linear_projection=lowercase_ , addition_embed_type="text_time" , addition_time_embed_dim=8 , transformer_layers_per_block=(1, 2) , projection_class_embeddings_input_dim=80 , cross_attention_dim=64 , )
_snake_case : Any = EulerDiscreteScheduler(
beta_start=0.00_085 , beta_end=0.012 , steps_offset=1 , beta_schedule="scaled_linear" , timestep_spacing="leading" , )
torch.manual_seed(0 )
_snake_case : List[str] = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , sample_size=128 , )
torch.manual_seed(0 )
_snake_case : str = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , hidden_act="gelu" , projection_dim=32 , )
_snake_case : Tuple = CLIPTextModel(lowercase_ )
_snake_case : Optional[Any] = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" , local_files_only=lowercase_ )
_snake_case : Union[str, Any] = CLIPTextModelWithProjection(lowercase_ )
_snake_case : Optional[int] = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" , local_files_only=lowercase_ )
_snake_case : str = {
"unet": unet,
"scheduler": scheduler,
"vae": vae,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"text_encoder_2": text_encoder_a,
"tokenizer_2": tokenizer_a,
# "safety_checker": None,
# "feature_extractor": None,
}
return components
def UpperCamelCase ( self , lowercase_ , lowercase_=0 ):
_snake_case : List[Any] = floats_tensor((1, 3, 32, 32) , rng=random.Random(lowercase_ ) ).to(lowercase_ )
_snake_case : List[Any] = image / 2 + 0.5
if str(lowercase_ ).startswith("mps" ):
_snake_case : Tuple = torch.manual_seed(lowercase_ )
else:
_snake_case : int = torch.Generator(device=lowercase_ ).manual_seed(lowercase_ )
_snake_case : List[Any] = {
"prompt": "A painting of a squirrel eating a burger",
"image": image,
"generator": generator,
"num_inference_steps": 2,
"guidance_scale": 5.0,
"output_type": "numpy",
"strength": 0.75,
}
return inputs
def UpperCamelCase ( self ):
_snake_case : Tuple = "cpu" # ensure determinism for the device-dependent torch.Generator
_snake_case : Optional[Any] = self.get_dummy_components()
_snake_case : Union[str, Any] = StableDiffusionXLImgaImgPipeline(**lowercase_ )
_snake_case : Optional[Any] = sd_pipe.to(lowercase_ )
sd_pipe.set_progress_bar_config(disable=lowercase_ )
_snake_case : Dict = self.get_dummy_inputs(lowercase_ )
_snake_case : str = sd_pipe(**lowercase_ ).images
_snake_case : Tuple = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
_snake_case : Optional[int] = np.array([0.4_656, 0.4_840, 0.4_439, 0.6_698, 0.5_574, 0.4_524, 0.5_799, 0.5_943, 0.5_165] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def UpperCamelCase ( self ):
super().test_attention_slicing_forward_pass(expected_max_diff=3e-3 )
def UpperCamelCase ( self ):
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
def UpperCamelCase ( self ):
pass
def UpperCamelCase ( self ):
_snake_case : Any = self.get_dummy_components()
_snake_case : str = StableDiffusionXLImgaImgPipeline(**lowercase_ )
_snake_case : Tuple = sd_pipe.to(lowercase_ )
_snake_case : Union[str, Any] = sd_pipe.to(lowercase_ )
sd_pipe.set_progress_bar_config(disable=lowercase_ )
# forward without prompt embeds
_snake_case : List[str] = self.get_dummy_inputs(lowercase_ )
_snake_case : Dict = 3 * ["this is a negative prompt"]
_snake_case : Any = negative_prompt
_snake_case : Tuple = 3 * [inputs["prompt"]]
_snake_case : List[str] = sd_pipe(**lowercase_ )
_snake_case : Dict = output.images[0, -3:, -3:, -1]
# forward with prompt embeds
_snake_case : str = self.get_dummy_inputs(lowercase_ )
_snake_case : Any = 3 * ["this is a negative prompt"]
_snake_case : Any = 3 * [inputs.pop("prompt" )]
(
(
_snake_case
) ,(
_snake_case
) ,(
_snake_case
) ,(
_snake_case
) ,
) : Dict = sd_pipe.encode_prompt(lowercase_ , negative_prompt=lowercase_ )
_snake_case : Any = sd_pipe(
**lowercase_ , prompt_embeds=lowercase_ , negative_prompt_embeds=lowercase_ , pooled_prompt_embeds=lowercase_ , negative_pooled_prompt_embeds=lowercase_ , )
_snake_case : Dict = output.images[0, -3:, -3:, -1]
# make sure that it's equal
assert np.abs(image_slice_a.flatten() - image_slice_a.flatten() ).max() < 1e-4
@slow
@require_torch_gpu
class lowercase_ ( unittest.TestCase ):
def UpperCamelCase ( self ):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCamelCase ( self , lowercase_ , lowercase_="cpu" , lowercase_=torch.floataa , lowercase_=0 ):
_snake_case : Tuple = torch.Generator(device=lowercase_ ).manual_seed(lowercase_ )
_snake_case : str = np.random.RandomState(lowercase_ ).standard_normal((1, 4, 64, 64) )
_snake_case : int = torch.from_numpy(lowercase_ ).to(device=lowercase_ , dtype=lowercase_ )
_snake_case : Any = {
"prompt": "a photograph of an astronaut riding a horse",
"latents": latents,
"generator": generator,
"num_inference_steps": 3,
"guidance_scale": 7.5,
"output_type": "numpy",
}
return inputs
def UpperCamelCase ( self ):
_snake_case : str = DiffusionPipeline.from_pretrained("stabilityai/stable-diffusion-2-base" )
pipe.to(lowercase_ )
pipe.set_progress_bar_config(disable=lowercase_ )
_snake_case : str = self.get_inputs(lowercase_ )
_snake_case : Dict = pipe(**lowercase_ ).images
_snake_case : Optional[Any] = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 512, 512, 3)
_snake_case : List[str] = np.array([0.49_493, 0.47_896, 0.40_798, 0.54_214, 0.53_212, 0.48_202, 0.47_656, 0.46_329, 0.48_506] )
assert np.abs(image_slice - expected_slice ).max() < 7e-3 | 284 | from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__SCREAMING_SNAKE_CASE : Optional[int] = {
'configuration_distilbert': [
'DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP',
'DistilBertConfig',
'DistilBertOnnxConfig',
],
'tokenization_distilbert': ['DistilBertTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE : Optional[int] = ['DistilBertTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE : int = [
'DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'DistilBertForMaskedLM',
'DistilBertForMultipleChoice',
'DistilBertForQuestionAnswering',
'DistilBertForSequenceClassification',
'DistilBertForTokenClassification',
'DistilBertModel',
'DistilBertPreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE : Any = [
'TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFDistilBertForMaskedLM',
'TFDistilBertForMultipleChoice',
'TFDistilBertForQuestionAnswering',
'TFDistilBertForSequenceClassification',
'TFDistilBertForTokenClassification',
'TFDistilBertMainLayer',
'TFDistilBertModel',
'TFDistilBertPreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE : Optional[Any] = [
'FlaxDistilBertForMaskedLM',
'FlaxDistilBertForMultipleChoice',
'FlaxDistilBertForQuestionAnswering',
'FlaxDistilBertForSequenceClassification',
'FlaxDistilBertForTokenClassification',
'FlaxDistilBertModel',
'FlaxDistilBertPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_distilbert import (
DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
DistilBertConfig,
DistilBertOnnxConfig,
)
from .tokenization_distilbert import DistilBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_distilbert_fast import DistilBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_distilbert import (
DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
DistilBertForMaskedLM,
DistilBertForMultipleChoice,
DistilBertForQuestionAnswering,
DistilBertForSequenceClassification,
DistilBertForTokenClassification,
DistilBertModel,
DistilBertPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_distilbert import (
TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDistilBertForMaskedLM,
TFDistilBertForMultipleChoice,
TFDistilBertForQuestionAnswering,
TFDistilBertForSequenceClassification,
TFDistilBertForTokenClassification,
TFDistilBertMainLayer,
TFDistilBertModel,
TFDistilBertPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_distilbert import (
FlaxDistilBertForMaskedLM,
FlaxDistilBertForMultipleChoice,
FlaxDistilBertForQuestionAnswering,
FlaxDistilBertForSequenceClassification,
FlaxDistilBertForTokenClassification,
FlaxDistilBertModel,
FlaxDistilBertPreTrainedModel,
)
else:
import sys
__SCREAMING_SNAKE_CASE : str = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 284 | 1 |
from ..utils import (
OptionalDependencyNotAvailable,
is_flax_available,
is_scipy_available,
is_torch_available,
is_torchsde_available,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_pt_objects import * # noqa F403
else:
from .scheduling_consistency_models import CMStochasticIterativeScheduler
from .scheduling_ddim import DDIMScheduler
from .scheduling_ddim_inverse import DDIMInverseScheduler
from .scheduling_ddim_parallel import DDIMParallelScheduler
from .scheduling_ddpm import DDPMScheduler
from .scheduling_ddpm_parallel import DDPMParallelScheduler
from .scheduling_deis_multistep import DEISMultistepScheduler
from .scheduling_dpmsolver_multistep import DPMSolverMultistepScheduler
from .scheduling_dpmsolver_multistep_inverse import DPMSolverMultistepInverseScheduler
from .scheduling_dpmsolver_singlestep import DPMSolverSinglestepScheduler
from .scheduling_euler_ancestral_discrete import EulerAncestralDiscreteScheduler
from .scheduling_euler_discrete import EulerDiscreteScheduler
from .scheduling_heun_discrete import HeunDiscreteScheduler
from .scheduling_ipndm import IPNDMScheduler
from .scheduling_k_dpm_2_ancestral_discrete import KDPMaAncestralDiscreteScheduler
from .scheduling_k_dpm_2_discrete import KDPMaDiscreteScheduler
from .scheduling_karras_ve import KarrasVeScheduler
from .scheduling_pndm import PNDMScheduler
from .scheduling_repaint import RePaintScheduler
from .scheduling_sde_ve import ScoreSdeVeScheduler
from .scheduling_sde_vp import ScoreSdeVpScheduler
from .scheduling_unclip import UnCLIPScheduler
from .scheduling_unipc_multistep import UniPCMultistepScheduler
from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin
from .scheduling_vq_diffusion import VQDiffusionScheduler
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_flax_objects import * # noqa F403
else:
from .scheduling_ddim_flax import FlaxDDIMScheduler
from .scheduling_ddpm_flax import FlaxDDPMScheduler
from .scheduling_dpmsolver_multistep_flax import FlaxDPMSolverMultistepScheduler
from .scheduling_karras_ve_flax import FlaxKarrasVeScheduler
from .scheduling_lms_discrete_flax import FlaxLMSDiscreteScheduler
from .scheduling_pndm_flax import FlaxPNDMScheduler
from .scheduling_sde_ve_flax import FlaxScoreSdeVeScheduler
from .scheduling_utils_flax import (
FlaxKarrasDiffusionSchedulers,
FlaxSchedulerMixin,
FlaxSchedulerOutput,
broadcast_to_shape_from_left,
)
try:
if not (is_torch_available() and is_scipy_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_torch_and_scipy_objects import * # noqa F403
else:
from .scheduling_lms_discrete import LMSDiscreteScheduler
try:
if not (is_torch_available() and is_torchsde_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_torch_and_torchsde_objects import * # noqa F403
else:
from .scheduling_dpmsolver_sde import DPMSolverSDEScheduler
| 11 |
"""simple docstring"""
from __future__ import annotations
from math import pi
def _SCREAMING_SNAKE_CASE ( _lowercase : float , _lowercase : float , _lowercase : float ) ->dict[str, float]:
'''simple docstring'''
if (inductance, frequency, reactance).count(0 ) != 1:
raise ValueError("One and only one argument must be 0" )
if inductance < 0:
raise ValueError("Inductance cannot be negative" )
if frequency < 0:
raise ValueError("Frequency cannot be negative" )
if reactance < 0:
raise ValueError("Inductive reactance cannot be negative" )
if inductance == 0:
return {"inductance": reactance / (2 * pi * frequency)}
elif frequency == 0:
return {"frequency": reactance / (2 * pi * inductance)}
elif reactance == 0:
return {"reactance": 2 * pi * frequency * inductance}
else:
raise ValueError("Exactly one argument must be 0" )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 105 | 0 |
from transformers import BertTokenizerFast
from .custom_tokenization import CustomTokenizer
class _A ( __UpperCAmelCase ):
UpperCamelCase__ : Union[str, Any] = CustomTokenizer
pass
| 131 |
def __snake_case ( _UpperCAmelCase = 1000000 ):
__a = limit + 1
__a = [0] * limit
for first_term in range(1 , _UpperCAmelCase ):
for n in range(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
__a = first_term + n / first_term
if common_difference % 4: # d must be divisble by 4
continue
else:
common_difference /= 4
if (
first_term > common_difference
and first_term < 4 * common_difference
): # since x,y,z are positive integers
frequency[n] += 1 # so z>0 and a>d ,also 4d<a
__a = sum(1 for x in frequency[1:limit] if x == 10 )
return count
if __name__ == "__main__":
print(f'{solution() = }')
| 131 | 1 |
"""simple docstring"""
import itertools
import string
from collections.abc import Generator, Iterable
def UpperCAmelCase__ ( lowerCAmelCase__ :Iterable[str] , lowerCAmelCase__ :int ) -> List[str]:
'''simple docstring'''
lowercase = iter(lowerCAmelCase__ )
while True:
lowercase = tuple(itertools.islice(lowerCAmelCase__ , lowerCAmelCase__ ) )
if not chunk:
return
yield chunk
def UpperCAmelCase__ ( lowerCAmelCase__ :str ) -> Union[str, Any]:
'''simple docstring'''
lowercase = ''.join([c.upper() for c in dirty if c in string.ascii_letters] )
lowercase = ''
if len(lowerCAmelCase__ ) < 2:
return dirty
for i in range(len(lowerCAmelCase__ ) - 1 ):
clean += dirty[i]
if dirty[i] == dirty[i + 1]:
clean += "X"
clean += dirty[-1]
if len(lowerCAmelCase__ ) & 1:
clean += "X"
return clean
def UpperCAmelCase__ ( lowerCAmelCase__ :str ) -> Dict:
'''simple docstring'''
lowercase = 'ABCDEFGHIKLMNOPQRSTUVWXYZ'
# we're using a list instead of a '2d' array because it makes the math
# for setting up the table and doing the actual encoding/decoding simpler
lowercase = []
# copy key chars into the table if they are in `alphabet` ignoring duplicates
for char in key.upper():
if char not in table and char in alphabet:
table.append(lowerCAmelCase__ )
# fill the rest of the table in with the remaining alphabet chars
for char in alphabet:
if char not in table:
table.append(lowerCAmelCase__ )
return table
def UpperCAmelCase__ ( lowerCAmelCase__ :str , lowerCAmelCase__ :str ) -> List[str]:
'''simple docstring'''
lowercase = generate_table(lowerCAmelCase__ )
lowercase = prepare_input(lowerCAmelCase__ )
lowercase = ''
# https://en.wikipedia.org/wiki/Playfair_cipher#Description
for chara, chara in chunker(lowerCAmelCase__ , 2 ):
lowercase = divmod(table.index(lowerCAmelCase__ ) , 5 )
lowercase = divmod(table.index(lowerCAmelCase__ ) , 5 )
if rowa == rowa:
ciphertext += table[rowa * 5 + (cola + 1) % 5]
ciphertext += table[rowa * 5 + (cola + 1) % 5]
elif cola == cola:
ciphertext += table[((rowa + 1) % 5) * 5 + cola]
ciphertext += table[((rowa + 1) % 5) * 5 + cola]
else: # rectangle
ciphertext += table[rowa * 5 + cola]
ciphertext += table[rowa * 5 + cola]
return ciphertext
def UpperCAmelCase__ ( lowerCAmelCase__ :str , lowerCAmelCase__ :str ) -> List[Any]:
'''simple docstring'''
lowercase = generate_table(lowerCAmelCase__ )
lowercase = ''
# https://en.wikipedia.org/wiki/Playfair_cipher#Description
for chara, chara in chunker(lowerCAmelCase__ , 2 ):
lowercase = divmod(table.index(lowerCAmelCase__ ) , 5 )
lowercase = divmod(table.index(lowerCAmelCase__ ) , 5 )
if rowa == rowa:
plaintext += table[rowa * 5 + (cola - 1) % 5]
plaintext += table[rowa * 5 + (cola - 1) % 5]
elif cola == cola:
plaintext += table[((rowa - 1) % 5) * 5 + cola]
plaintext += table[((rowa - 1) % 5) * 5 + cola]
else: # rectangle
plaintext += table[rowa * 5 + cola]
plaintext += table[rowa * 5 + cola]
return plaintext
| 197 | import os
import tempfile
import unittest
from pathlib import Path
from transformers import AutoConfig, is_tf_available
from transformers.testing_utils import require_tf
if is_tf_available():
import tensorflow as tf
from transformers import TensorFlowBenchmark, TensorFlowBenchmarkArguments
@require_tf
class _lowercase ( unittest.TestCase ):
def SCREAMING_SNAKE_CASE__ ( self : int , snake_case : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
for model_result in results.values():
for batch_size, sequence_length in zip(model_result['bs'] , model_result['ss'] ):
UpperCamelCase_ : Optional[Any] = model_result['result'][batch_size][sequence_length]
self.assertIsNotNone(snake_case )
def SCREAMING_SNAKE_CASE__ ( self : int ) -> Tuple:
"""simple docstring"""
UpperCamelCase_ : Optional[Any] = 'sshleifer/tiny-gpt2'
UpperCamelCase_ : Dict = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=snake_case , inference=snake_case , sequence_lengths=[8] , batch_sizes=[1] , eager_mode=snake_case , multi_process=snake_case , )
UpperCamelCase_ : Tuple = TensorFlowBenchmark(snake_case )
UpperCamelCase_ : List[str] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def SCREAMING_SNAKE_CASE__ ( self : str ) -> Optional[Any]:
"""simple docstring"""
UpperCamelCase_ : Dict = 'sgugger/tiny-distilbert-classification'
UpperCamelCase_ : int = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=snake_case , inference=snake_case , sequence_lengths=[8] , batch_sizes=[1] , multi_process=snake_case , only_pretrain_model=snake_case , )
UpperCamelCase_ : int = TensorFlowBenchmark(snake_case )
UpperCamelCase_ : Tuple = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def SCREAMING_SNAKE_CASE__ ( self : Tuple ) -> List[str]:
"""simple docstring"""
UpperCamelCase_ : str = 'sshleifer/tiny-gpt2'
UpperCamelCase_ : Any = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=snake_case , inference=snake_case , sequence_lengths=[8] , batch_sizes=[1] , multi_process=snake_case , )
UpperCamelCase_ : List[str] = TensorFlowBenchmark(snake_case )
UpperCamelCase_ : Optional[int] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def SCREAMING_SNAKE_CASE__ ( self : Tuple ) -> Dict:
"""simple docstring"""
UpperCamelCase_ : List[str] = 'sshleifer/tiny-gpt2'
UpperCamelCase_ : str = AutoConfig.from_pretrained(snake_case )
UpperCamelCase_ : List[str] = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=snake_case , inference=snake_case , sequence_lengths=[8] , batch_sizes=[1] , eager_mode=snake_case , multi_process=snake_case , )
UpperCamelCase_ : int = TensorFlowBenchmark(snake_case , [config] )
UpperCamelCase_ : Optional[Any] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ) -> List[str]:
"""simple docstring"""
UpperCamelCase_ : List[str] = 'sshleifer/tiny-gpt2'
UpperCamelCase_ : int = AutoConfig.from_pretrained(snake_case )
UpperCamelCase_ : Any = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=snake_case , inference=snake_case , sequence_lengths=[8] , batch_sizes=[1] , multi_process=snake_case , )
UpperCamelCase_ : Tuple = TensorFlowBenchmark(snake_case , [config] )
UpperCamelCase_ : Tuple = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def SCREAMING_SNAKE_CASE__ ( self : Tuple ) -> int:
"""simple docstring"""
UpperCamelCase_ : Optional[int] = 'sshleifer/tiny-gpt2'
UpperCamelCase_ : Tuple = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=snake_case , inference=snake_case , sequence_lengths=[8] , batch_sizes=[1] , multi_process=snake_case , )
UpperCamelCase_ : Optional[int] = TensorFlowBenchmark(snake_case )
UpperCamelCase_ : str = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def SCREAMING_SNAKE_CASE__ ( self : Any ) -> int:
"""simple docstring"""
UpperCamelCase_ : Tuple = 'sshleifer/tiny-gpt2'
UpperCamelCase_ : List[str] = AutoConfig.from_pretrained(snake_case )
UpperCamelCase_ : int = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=snake_case , inference=snake_case , sequence_lengths=[8] , batch_sizes=[1] , multi_process=snake_case , )
UpperCamelCase_ : List[Any] = TensorFlowBenchmark(snake_case , [config] )
UpperCamelCase_ : int = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ) -> Dict:
"""simple docstring"""
UpperCamelCase_ : Tuple = 'patrickvonplaten/t5-tiny-random'
UpperCamelCase_ : List[str] = AutoConfig.from_pretrained(snake_case )
UpperCamelCase_ : List[str] = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=snake_case , inference=snake_case , sequence_lengths=[8] , batch_sizes=[1] , multi_process=snake_case , )
UpperCamelCase_ : int = TensorFlowBenchmark(snake_case , configs=[config] )
UpperCamelCase_ : Optional[Any] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
@unittest.skipIf(is_tf_available() and len(tf.config.list_physical_devices('GPU' ) ) == 0 , 'Cannot do xla on CPU.' )
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ) -> Dict:
"""simple docstring"""
UpperCamelCase_ : int = 'sshleifer/tiny-gpt2'
UpperCamelCase_ : Union[str, Any] = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=snake_case , inference=snake_case , sequence_lengths=[8] , batch_sizes=[1] , use_xla=snake_case , multi_process=snake_case , )
UpperCamelCase_ : Union[str, Any] = TensorFlowBenchmark(snake_case )
UpperCamelCase_ : List[str] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def SCREAMING_SNAKE_CASE__ ( self : List[str] ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase_ : List[Any] = 'sshleifer/tiny-gpt2'
with tempfile.TemporaryDirectory() as tmp_dir:
UpperCamelCase_ : List[Any] = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , inference=snake_case , save_to_csv=snake_case , sequence_lengths=[8] , batch_sizes=[1] , inference_time_csv_file=os.path.join(snake_case , 'inf_time.csv' ) , inference_memory_csv_file=os.path.join(snake_case , 'inf_mem.csv' ) , env_info_csv_file=os.path.join(snake_case , 'env.csv' ) , multi_process=snake_case , )
UpperCamelCase_ : List[str] = TensorFlowBenchmark(snake_case )
benchmark.run()
self.assertTrue(Path(os.path.join(snake_case , 'inf_time.csv' ) ).exists() )
self.assertTrue(Path(os.path.join(snake_case , 'inf_mem.csv' ) ).exists() )
self.assertTrue(Path(os.path.join(snake_case , 'env.csv' ) ).exists() )
def SCREAMING_SNAKE_CASE__ ( self : List[str] ) -> str:
"""simple docstring"""
UpperCamelCase_ : Optional[Any] = 'sshleifer/tiny-gpt2'
def _check_summary_is_not_empty(snake_case : Union[str, Any] ):
self.assertTrue(hasattr(snake_case , 'sequential' ) )
self.assertTrue(hasattr(snake_case , 'cumulative' ) )
self.assertTrue(hasattr(snake_case , 'current' ) )
self.assertTrue(hasattr(snake_case , 'total' ) )
with tempfile.TemporaryDirectory() as tmp_dir:
UpperCamelCase_ : str = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , inference=snake_case , sequence_lengths=[8] , batch_sizes=[1] , log_filename=os.path.join(snake_case , 'log.txt' ) , log_print=snake_case , trace_memory_line_by_line=snake_case , eager_mode=snake_case , multi_process=snake_case , )
UpperCamelCase_ : Tuple = TensorFlowBenchmark(snake_case )
UpperCamelCase_ : Union[str, Any] = benchmark.run()
_check_summary_is_not_empty(result.inference_summary )
self.assertTrue(Path(os.path.join(snake_case , 'log.txt' ) ).exists() )
| 175 | 0 |
"""simple docstring"""
def a__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> int:
return int((input_a, input_a).count(0 ) != 0 )
def a__ ( ) -> None:
assert nand_gate(0 , 0 ) == 1
assert nand_gate(0 , 1 ) == 1
assert nand_gate(1 , 0 ) == 1
assert nand_gate(1 , 1 ) == 0
if __name__ == "__main__":
print(nand_gate(0, 0))
print(nand_gate(0, 1))
print(nand_gate(1, 0))
print(nand_gate(1, 1))
| 353 |
"""simple docstring"""
import collections
import importlib.util
import os
import re
from pathlib import Path
__A = "src/transformers"
# Matches is_xxx_available()
__A = re.compile(r"is\_([a-z_]*)_available()")
# Catches a one-line _import_struct = {xxx}
__A = re.compile(r"^_import_structure\s+=\s+\{([^\}]+)\}")
# Catches a line with a key-values pattern: "bla": ["foo", "bar"]
__A = re.compile(r"\s+\"\S*\":\s+\[([^\]]*)\]")
# Catches a line if not is_foo_available
__A = re.compile(r"^\s*if\s+not\s+is\_[a-z_]*\_available\(\)")
# Catches a line _import_struct["bla"].append("foo")
__A = re.compile(r"^\s*_import_structure\[\"\S*\"\]\.append\(\"(\S*)\"\)")
# Catches a line _import_struct["bla"].extend(["foo", "bar"]) or _import_struct["bla"] = ["foo", "bar"]
__A = re.compile(r"^\s*_import_structure\[\S*\](?:\.extend\(|\s*=\s+)\[([^\]]*)\]")
# Catches a line with an object between quotes and a comma: "MyModel",
__A = re.compile("^\s+\"([^\"]+)\",")
# Catches a line with objects between brackets only: ["foo", "bar"],
__A = re.compile("^\s+\[([^\]]+)\]")
# Catches a line with from foo import bar, bla, boo
__A = re.compile(r"\s+from\s+\S*\s+import\s+([^\(\s].*)\n")
# Catches a line with try:
__A = re.compile(r"^\s*try:")
# Catches a line with else:
__A = re.compile(r"^\s*else:")
def a__ ( __SCREAMING_SNAKE_CASE ) -> Any:
if _re_test_backend.search(__SCREAMING_SNAKE_CASE ) is None:
return None
__lowerCAmelCase: Union[str, Any] = [b[0] for b in _re_backend.findall(__SCREAMING_SNAKE_CASE )]
backends.sort()
return "_and_".join(__SCREAMING_SNAKE_CASE )
def a__ ( __SCREAMING_SNAKE_CASE ) -> int:
with open(__SCREAMING_SNAKE_CASE , "r" , encoding="utf-8" , newline="\n" ) as f:
__lowerCAmelCase: Optional[int] = f.readlines()
__lowerCAmelCase: Dict = 0
while line_index < len(__SCREAMING_SNAKE_CASE ) and not lines[line_index].startswith("_import_structure = {" ):
line_index += 1
# If this is a traditional init, just return.
if line_index >= len(__SCREAMING_SNAKE_CASE ):
return None
# First grab the objects without a specific backend in _import_structure
__lowerCAmelCase: Optional[Any] = []
while not lines[line_index].startswith("if TYPE_CHECKING" ) and find_backend(lines[line_index] ) is None:
__lowerCAmelCase: Optional[int] = lines[line_index]
# If we have everything on a single line, let's deal with it.
if _re_one_line_import_struct.search(__SCREAMING_SNAKE_CASE ):
__lowerCAmelCase: List[Any] = _re_one_line_import_struct.search(__SCREAMING_SNAKE_CASE ).groups()[0]
__lowerCAmelCase: List[Any] = re.findall("\[([^\]]+)\]" , __SCREAMING_SNAKE_CASE )
for imp in imports:
objects.extend([obj[1:-1] for obj in imp.split(", " )] )
line_index += 1
continue
__lowerCAmelCase: str = _re_import_struct_key_value.search(__SCREAMING_SNAKE_CASE )
if single_line_import_search is not None:
__lowerCAmelCase: str = [obj[1:-1] for obj in single_line_import_search.groups()[0].split(", " ) if len(__SCREAMING_SNAKE_CASE ) > 0]
objects.extend(__SCREAMING_SNAKE_CASE )
elif line.startswith(" " * 8 + "\"" ):
objects.append(line[9:-3] )
line_index += 1
__lowerCAmelCase: Tuple = {"none": objects}
# Let's continue with backend-specific objects in _import_structure
while not lines[line_index].startswith("if TYPE_CHECKING" ):
# If the line is an if not is_backend_available, we grab all objects associated.
__lowerCAmelCase: Optional[int] = find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
__lowerCAmelCase: Optional[int] = None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
__lowerCAmelCase: Optional[int] = []
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(" " * 4 ):
__lowerCAmelCase: Optional[Any] = lines[line_index]
if _re_import_struct_add_one.search(__SCREAMING_SNAKE_CASE ) is not None:
objects.append(_re_import_struct_add_one.search(__SCREAMING_SNAKE_CASE ).groups()[0] )
elif _re_import_struct_add_many.search(__SCREAMING_SNAKE_CASE ) is not None:
__lowerCAmelCase: Union[str, Any] = _re_import_struct_add_many.search(__SCREAMING_SNAKE_CASE ).groups()[0].split(", " )
__lowerCAmelCase: int = [obj[1:-1] for obj in imports if len(__SCREAMING_SNAKE_CASE ) > 0]
objects.extend(__SCREAMING_SNAKE_CASE )
elif _re_between_brackets.search(__SCREAMING_SNAKE_CASE ) is not None:
__lowerCAmelCase: Tuple = _re_between_brackets.search(__SCREAMING_SNAKE_CASE ).groups()[0].split(", " )
__lowerCAmelCase: Tuple = [obj[1:-1] for obj in imports if len(__SCREAMING_SNAKE_CASE ) > 0]
objects.extend(__SCREAMING_SNAKE_CASE )
elif _re_quote_object.search(__SCREAMING_SNAKE_CASE ) is not None:
objects.append(_re_quote_object.search(__SCREAMING_SNAKE_CASE ).groups()[0] )
elif line.startswith(" " * 8 + "\"" ):
objects.append(line[9:-3] )
elif line.startswith(" " * 1_2 + "\"" ):
objects.append(line[1_3:-3] )
line_index += 1
__lowerCAmelCase: Union[str, Any] = objects
else:
line_index += 1
# At this stage we are in the TYPE_CHECKING part, first grab the objects without a specific backend
__lowerCAmelCase: str = []
while (
line_index < len(__SCREAMING_SNAKE_CASE )
and find_backend(lines[line_index] ) is None
and not lines[line_index].startswith("else" )
):
__lowerCAmelCase: List[Any] = lines[line_index]
__lowerCAmelCase: Tuple = _re_import.search(__SCREAMING_SNAKE_CASE )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(", " ) )
elif line.startswith(" " * 8 ):
objects.append(line[8:-2] )
line_index += 1
__lowerCAmelCase: Any = {"none": objects}
# Let's continue with backend-specific objects
while line_index < len(__SCREAMING_SNAKE_CASE ):
# If the line is an if is_backend_available, we grab all objects associated.
__lowerCAmelCase: Optional[Any] = find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
__lowerCAmelCase: Any = None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
__lowerCAmelCase: List[Any] = []
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(" " * 8 ):
__lowerCAmelCase: Optional[int] = lines[line_index]
__lowerCAmelCase: Any = _re_import.search(__SCREAMING_SNAKE_CASE )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(", " ) )
elif line.startswith(" " * 1_2 ):
objects.append(line[1_2:-2] )
line_index += 1
__lowerCAmelCase: Union[str, Any] = objects
else:
line_index += 1
return import_dict_objects, type_hint_objects
def a__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> Tuple:
def find_duplicates(__SCREAMING_SNAKE_CASE ):
return [k for k, v in collections.Counter(__SCREAMING_SNAKE_CASE ).items() if v > 1]
if list(import_dict_objects.keys() ) != list(type_hint_objects.keys() ):
return ["Both sides of the init do not have the same backends!"]
__lowerCAmelCase: Optional[int] = []
for key in import_dict_objects.keys():
__lowerCAmelCase: Union[str, Any] = find_duplicates(import_dict_objects[key] )
if duplicate_imports:
errors.append(F"Duplicate _import_structure definitions for: {duplicate_imports}" )
__lowerCAmelCase: Optional[Any] = find_duplicates(type_hint_objects[key] )
if duplicate_type_hints:
errors.append(F"Duplicate TYPE_CHECKING objects for: {duplicate_type_hints}" )
if sorted(set(import_dict_objects[key] ) ) != sorted(set(type_hint_objects[key] ) ):
__lowerCAmelCase: Union[str, Any] = "base imports" if key == "none" else F"{key} backend"
errors.append(F"Differences for {name}:" )
for a in type_hint_objects[key]:
if a not in import_dict_objects[key]:
errors.append(F" {a} in TYPE_HINT but not in _import_structure." )
for a in import_dict_objects[key]:
if a not in type_hint_objects[key]:
errors.append(F" {a} in _import_structure but not in TYPE_HINT." )
return errors
def a__ ( ) -> Tuple:
__lowerCAmelCase: Optional[Any] = []
for root, _, files in os.walk(__SCREAMING_SNAKE_CASE ):
if "__init__.py" in files:
__lowerCAmelCase: List[str] = os.path.join(__SCREAMING_SNAKE_CASE , "__init__.py" )
__lowerCAmelCase: int = parse_init(__SCREAMING_SNAKE_CASE )
if objects is not None:
__lowerCAmelCase: Optional[Any] = analyze_results(*__SCREAMING_SNAKE_CASE )
if len(__SCREAMING_SNAKE_CASE ) > 0:
__lowerCAmelCase: Union[str, Any] = F"Problem in {fname}, both halves do not define the same objects.\n{errors[0]}"
failures.append("\n".join(__SCREAMING_SNAKE_CASE ) )
if len(__SCREAMING_SNAKE_CASE ) > 0:
raise ValueError("\n\n".join(__SCREAMING_SNAKE_CASE ) )
def a__ ( ) -> Any:
__lowerCAmelCase: Optional[int] = []
for path, directories, files in os.walk(__SCREAMING_SNAKE_CASE ):
for folder in directories:
# Ignore private modules
if folder.startswith("_" ):
directories.remove(__SCREAMING_SNAKE_CASE )
continue
# Ignore leftovers from branches (empty folders apart from pycache)
if len(list((Path(__SCREAMING_SNAKE_CASE ) / folder).glob("*.py" ) ) ) == 0:
continue
__lowerCAmelCase: Optional[int] = str((Path(__SCREAMING_SNAKE_CASE ) / folder).relative_to(__SCREAMING_SNAKE_CASE ) )
__lowerCAmelCase: Tuple = short_path.replace(os.path.sep , "." )
submodules.append(__SCREAMING_SNAKE_CASE )
for fname in files:
if fname == "__init__.py":
continue
__lowerCAmelCase: Dict = str((Path(__SCREAMING_SNAKE_CASE ) / fname).relative_to(__SCREAMING_SNAKE_CASE ) )
__lowerCAmelCase: Dict = short_path.replace(".py" , "" ).replace(os.path.sep , "." )
if len(submodule.split("." ) ) == 1:
submodules.append(__SCREAMING_SNAKE_CASE )
return submodules
__A = [
"convert_pytorch_checkpoint_to_tf2",
"modeling_flax_pytorch_utils",
]
def a__ ( ) -> Optional[int]:
# This is to make sure the transformers module imported is the one in the repo.
__lowerCAmelCase: Optional[Any] = importlib.util.spec_from_file_location(
"transformers" , os.path.join(__SCREAMING_SNAKE_CASE , "__init__.py" ) , submodule_search_locations=[PATH_TO_TRANSFORMERS] , )
__lowerCAmelCase: Optional[int] = spec.loader.load_module()
__lowerCAmelCase: str = [
module
for module in get_transformers_submodules()
if module not in IGNORE_SUBMODULES and module not in transformers._import_structure.keys()
]
if len(__SCREAMING_SNAKE_CASE ) > 0:
__lowerCAmelCase: Optional[int] = "\n".join(F"- {module}" for module in module_not_registered )
raise ValueError(
"The following submodules are not properly registered in the main init of Transformers:\n"
F"{list_of_modules}\n"
"Make sure they appear somewhere in the keys of `_import_structure` with an empty list as value." )
if __name__ == "__main__":
check_all_inits()
check_submodules()
| 108 | 0 |
import copy
import os
import cva
import numpy as np
from matplotlib import pyplot as plt
class UpperCAmelCase :
def __init__(self : List[str] ) -> Any:
'''simple docstring'''
snake_case : Tuple = ""
snake_case : str = ""
snake_case : Dict = []
snake_case : str = 0
snake_case : Tuple = 2_56
snake_case : Optional[Any] = 0
snake_case : Union[str, Any] = 0
snake_case : Any = 0
snake_case : Union[str, Any] = 0
def _SCREAMING_SNAKE_CASE (self : List[Any] , snake_case__ : Tuple ) -> List[Any]:
'''simple docstring'''
snake_case : Optional[Any] = cva.imread(snake_case__ , 0 )
snake_case : Union[str, Any] = copy.deepcopy(self.img )
snake_case , snake_case , snake_case : Tuple = plt.hist(self.img.ravel() , 2_56 , [0, 2_56] , label="x" )
snake_case : Any = np.sum(snake_case__ )
for i in range(len(snake_case__ ) ):
snake_case : Union[str, Any] = x[i] / self.k
self.sk += prk
snake_case : Optional[Any] = (self.L - 1) * self.sk
if self.rem != 0:
snake_case : List[str] = int(last % last )
snake_case : List[str] = int(last + 1 if self.rem >= 0.5 else last )
self.last_list.append(snake_case__ )
snake_case : int = int(np.ma.count(self.img ) / self.img[1].size )
snake_case : Optional[int] = self.img[1].size
for i in range(self.number_of_cols ):
for j in range(self.number_of_rows ):
snake_case : Union[str, Any] = self.img[j][i]
if num != self.last_list[num]:
snake_case : Dict = self.last_list[num]
cva.imwrite("output_data/output.jpg" , self.img )
def _SCREAMING_SNAKE_CASE (self : List[Any] ) -> List[Any]:
'''simple docstring'''
plt.hist(self.img.ravel() , 2_56 , [0, 2_56] )
def _SCREAMING_SNAKE_CASE (self : str ) -> int:
'''simple docstring'''
cva.imshow("Output-Image" , self.img )
cva.imshow("Input-Image" , self.original_image )
cva.waitKey(50_00 )
cva.destroyAllWindows()
if __name__ == "__main__":
__lowerCamelCase = os.path.join(os.path.basename(__file__), """image_data/input.jpg""")
__lowerCamelCase = ConstantStretch()
stretcher.stretch(file_path)
stretcher.plot_histogram()
stretcher.show_image()
| 59 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowerCamelCase = logging.get_logger(__name__)
__lowerCamelCase = {
# See all MEGATRON_BERT models at https://huggingface.co/models?filter=bert
}
class UpperCAmelCase ( A_ ):
A__ : List[str] = "megatron-bert"
def __init__(self : Optional[int] , snake_case__ : List[str]=2_90_56 , snake_case__ : List[Any]=10_24 , snake_case__ : str=24 , snake_case__ : Tuple=16 , snake_case__ : Union[str, Any]=40_96 , snake_case__ : str="gelu" , snake_case__ : str=0.1 , snake_case__ : Optional[int]=0.1 , snake_case__ : Tuple=5_12 , snake_case__ : Union[str, Any]=2 , snake_case__ : Dict=0.02 , snake_case__ : List[Any]=1e-12 , snake_case__ : int=0 , snake_case__ : Tuple="absolute" , snake_case__ : Any=True , **snake_case__ : Union[str, Any] , ) -> Optional[Any]:
'''simple docstring'''
super().__init__(pad_token_id=snake_case__ , **snake_case__ )
snake_case : Tuple = vocab_size
snake_case : str = hidden_size
snake_case : str = num_hidden_layers
snake_case : str = num_attention_heads
snake_case : Optional[int] = hidden_act
snake_case : int = intermediate_size
snake_case : List[str] = hidden_dropout_prob
snake_case : Union[str, Any] = attention_probs_dropout_prob
snake_case : Dict = max_position_embeddings
snake_case : List[str] = type_vocab_size
snake_case : List[str] = initializer_range
snake_case : Tuple = layer_norm_eps
snake_case : int = position_embedding_type
snake_case : str = use_cache
| 59 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
_SCREAMING_SNAKE_CASE = {
'''configuration_mvp''': ['''MVP_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''MvpConfig''', '''MvpOnnxConfig'''],
'''tokenization_mvp''': ['''MvpTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE = ['''MvpTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE = [
'''MVP_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''MvpForCausalLM''',
'''MvpForConditionalGeneration''',
'''MvpForQuestionAnswering''',
'''MvpForSequenceClassification''',
'''MvpModel''',
'''MvpPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_mvp import MVP_PRETRAINED_CONFIG_ARCHIVE_MAP, MvpConfig, MvpOnnxConfig
from .tokenization_mvp import MvpTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mvp_fast import MvpTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mvp import (
MVP_PRETRAINED_MODEL_ARCHIVE_LIST,
MvpForCausalLM,
MvpForConditionalGeneration,
MvpForQuestionAnswering,
MvpForSequenceClassification,
MvpModel,
MvpPreTrainedModel,
)
else:
import sys
_SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 217 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE = {
'''abeja/gpt-neox-japanese-2.7b''': '''https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/config.json''',
}
class __lowercase ( lowerCAmelCase__ ):
'''simple docstring'''
a : Union[str, Any] = "gpt_neox_japanese"
def __init__(self ,_lowerCamelCase=32000 ,_lowerCamelCase=2560 ,_lowerCamelCase=32 ,_lowerCamelCase=32 ,_lowerCamelCase=4 ,_lowerCamelCase="gelu" ,_lowerCamelCase=1.0_0 ,_lowerCamelCase=10000 ,_lowerCamelCase=2048 ,_lowerCamelCase=0.0_2 ,_lowerCamelCase=1E-5 ,_lowerCamelCase=True ,_lowerCamelCase=31996 ,_lowerCamelCase=31999 ,_lowerCamelCase=0.1 ,_lowerCamelCase=0.0 ,**_lowerCamelCase ,) -> Optional[int]:
'''simple docstring'''
super().__init__(bos_token_id=_lowerCamelCase ,eos_token_id=_lowerCamelCase ,**_lowerCamelCase )
__lowercase = vocab_size
__lowercase = max_position_embeddings
__lowercase = hidden_size
__lowercase = num_hidden_layers
__lowercase = num_attention_heads
__lowercase = intermediate_multiple_size
__lowercase = hidden_act
__lowercase = rotary_pct
__lowercase = rotary_emb_base
__lowercase = initializer_range
__lowercase = layer_norm_eps
__lowercase = use_cache
__lowercase = attention_dropout
__lowercase = hidden_dropout
| 217 | 1 |
"""simple docstring"""
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
_lowercase = logging.get_logger(__name__)
_lowercase = '''▁'''
_lowercase = {'''vocab_file''': '''spiece.model'''}
_lowercase = {
'''vocab_file''': {
'''google/reformer-crime-and-punishment''': (
'''https://huggingface.co/google/reformer-crime-and-punishment/resolve/main/spiece.model'''
)
}
}
_lowercase = {
'''google/reformer-crime-and-punishment''': 52_42_88,
}
class lowerCAmelCase_ ( _lowercase ):
'''simple docstring'''
_lowerCamelCase: Any = VOCAB_FILES_NAMES
_lowerCamelCase: List[str] = PRETRAINED_VOCAB_FILES_MAP
_lowerCamelCase: int = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_lowerCamelCase: Any = ['''input_ids''', '''attention_mask''']
def __init__( self : Tuple ,A_ : Optional[Any] ,A_ : Optional[Any]="</s>" ,A_ : Union[str, Any]="<unk>" ,A_ : Dict=[] ,A_ : Optional[Dict[str, Any]] = None ,**A_ : str ,) -> None:
A = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
eos_token=A_ ,unk_token=A_ ,additional_special_tokens=A_ ,sp_model_kwargs=self.sp_model_kwargs ,**A_ ,)
A = vocab_file
A = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(A_ )
@property
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> List[Any]:
return self.sp_model.get_piece_size()
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Dict[str, int]:
A = {self.convert_ids_to_tokens(A_ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : str ) -> Tuple:
A = self.__dict__.copy()
A = None
return state
def __setstate__( self : int ,A_ : str ) -> str:
A = d
# for backward compatibility
if not hasattr(self ,'sp_model_kwargs' ):
A = {}
A = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def _SCREAMING_SNAKE_CASE ( self : List[Any] ,A_ : str ) -> List[str]:
return self.sp_model.encode(A_ ,out_type=A_ )
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ,A_ : Optional[Any] ) -> Optional[Any]:
return self.sp_model.piece_to_id(A_ )
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ,A_ : Optional[int] ) -> Dict:
if index < self.sp_model.get_piece_size():
A = self.sp_model.IdToPiece(A_ )
return token
def _SCREAMING_SNAKE_CASE ( self : Tuple ,A_ : int ) -> Dict:
A = []
A = ''
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(A_ ) + token
A = []
else:
current_sub_tokens.append(A_ )
out_string += self.sp_model.decode(A_ )
return out_string.strip()
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ,A_ : str ,A_ : Optional[str] = None ) -> Tuple[str]:
if not os.path.isdir(A_ ):
logger.error(F'Vocabulary path ({save_directory}) should be a directory' )
return
A = os.path.join(
A_ ,(filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(A_ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file ,A_ )
elif not os.path.isfile(self.vocab_file ):
with open(A_ ,'wb' ) as fi:
A = self.sp_model.serialized_model_proto()
fi.write(A_ )
return (out_vocab_file,) | 74 |
def _UpperCAmelCase (UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Union[str, Any] ):
# "extended trapezoidal rule"
# int(f) = dx/2 * (f1 + 2f2 + ... + fn)
_A : int = (boundary[1] - boundary[0]) / steps
_A : Any = boundary[0]
_A : List[Any] = boundary[1]
_A : str = make_points(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
_A : str = 0.0
y += (h / 2.0) * f(UpperCamelCase__ )
for i in x_i:
# print(i)
y += h * f(UpperCamelCase__ )
y += (h / 2.0) * f(UpperCamelCase__ )
return y
def _UpperCAmelCase (UpperCamelCase__ : List[str] , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Any ):
_A : Optional[int] = a + h
while x < (b - h):
yield x
_A : Dict = x + h
def _UpperCAmelCase (UpperCamelCase__ : Optional[int] ): # enter your function here
_A : Any = (x - 0) * (x - 0)
return y
def _UpperCAmelCase ():
_A : Optional[Any] = 0.0 # Lower bound of integration
_A : Optional[int] = 1.0 # Upper bound of integration
_A : List[Any] = 10.0 # define number of steps or resolution
_A : Any = [a, b] # define boundary of integration
_A : Tuple = method_a(UpperCamelCase__ , UpperCamelCase__ )
print(f"y = {y}" )
if __name__ == "__main__":
main()
| 11 | 0 |
"""simple docstring"""
import os
import unittest
from tempfile import TemporaryDirectory
import torch
import torch.nn as nn
from accelerate.utils import (
OffloadedWeightsLoader,
extract_submodules_state_dict,
load_offloaded_weight,
offload_state_dict,
offload_weight,
)
class lowerCamelCase__ ( nn.Module ):
def __init__( self ):
super().__init__()
UpperCAmelCase = nn.Linear(3 ,4 )
UpperCAmelCase = nn.BatchNormad(4 )
UpperCAmelCase = nn.Linear(4 ,5 )
def _UpperCamelCase ( self ,A ):
return self.lineara(self.batchnorm(self.lineara(UpperCamelCase_ ) ) )
class lowerCamelCase__ ( unittest.TestCase ):
def _UpperCamelCase ( self ):
UpperCAmelCase = ModelForTest()
with TemporaryDirectory() as tmp_dir:
offload_state_dict(UpperCamelCase_ ,model.state_dict() )
UpperCAmelCase = os.path.join(UpperCamelCase_ ,"""index.json""" )
self.assertTrue(os.path.isfile(UpperCamelCase_ ) )
# TODO: add tests on what is inside the index
for key in ["linear1.weight", "linear1.bias", "linear2.weight", "linear2.bias"]:
UpperCAmelCase = os.path.join(UpperCamelCase_ ,F'''{key}.dat''' )
self.assertTrue(os.path.isfile(UpperCamelCase_ ) )
# TODO: add tests on the fact weights are properly loaded
def _UpperCamelCase ( self ):
UpperCAmelCase = [torch.floataa, torch.floataa, torch.bfloataa]
for dtype in dtypes:
UpperCAmelCase = torch.randn(2 ,3 ,dtype=UpperCamelCase_ )
with TemporaryDirectory() as tmp_dir:
UpperCAmelCase = offload_weight(UpperCamelCase_ ,"""weight""" ,UpperCamelCase_ ,{} )
UpperCAmelCase = os.path.join(UpperCamelCase_ ,"""weight.dat""" )
self.assertTrue(os.path.isfile(UpperCamelCase_ ) )
self.assertDictEqual(UpperCamelCase_ ,{"""weight""": {"""shape""": [2, 3], """dtype""": str(UpperCamelCase_ ).split(""".""" )[1]}} )
UpperCAmelCase = load_offloaded_weight(UpperCamelCase_ ,index["""weight"""] )
self.assertTrue(torch.equal(UpperCamelCase_ ,UpperCamelCase_ ) )
def _UpperCamelCase ( self ):
UpperCAmelCase = ModelForTest()
UpperCAmelCase = model.state_dict()
UpperCAmelCase = {k: v for k, v in state_dict.items() if """linear2""" not in k}
UpperCAmelCase = {k: v for k, v in state_dict.items() if """linear2""" in k}
with TemporaryDirectory() as tmp_dir:
offload_state_dict(UpperCamelCase_ ,UpperCamelCase_ )
UpperCAmelCase = OffloadedWeightsLoader(state_dict=UpperCamelCase_ ,save_folder=UpperCamelCase_ )
# Every key is there with the right value
self.assertEqual(sorted(UpperCamelCase_ ) ,sorted(state_dict.keys() ) )
for key, param in state_dict.items():
self.assertTrue(torch.allclose(UpperCamelCase_ ,weight_map[key] ) )
UpperCAmelCase = {k: v for k, v in state_dict.items() if """weight""" in k}
UpperCAmelCase = {k: v for k, v in state_dict.items() if """weight""" not in k}
with TemporaryDirectory() as tmp_dir:
offload_state_dict(UpperCamelCase_ ,UpperCamelCase_ )
UpperCAmelCase = OffloadedWeightsLoader(state_dict=UpperCamelCase_ ,save_folder=UpperCamelCase_ )
# Every key is there with the right value
self.assertEqual(sorted(UpperCamelCase_ ) ,sorted(state_dict.keys() ) )
for key, param in state_dict.items():
self.assertTrue(torch.allclose(UpperCamelCase_ ,weight_map[key] ) )
with TemporaryDirectory() as tmp_dir:
offload_state_dict(UpperCamelCase_ ,UpperCamelCase_ )
# Duplicates are removed
UpperCAmelCase = OffloadedWeightsLoader(state_dict=UpperCamelCase_ ,save_folder=UpperCamelCase_ )
# Every key is there with the right value
self.assertEqual(sorted(UpperCamelCase_ ) ,sorted(state_dict.keys() ) )
for key, param in state_dict.items():
self.assertTrue(torch.allclose(UpperCamelCase_ ,weight_map[key] ) )
def _UpperCamelCase ( self ):
UpperCAmelCase = {"""a.1""": 0, """a.10""": 1, """a.2""": 2}
UpperCAmelCase = extract_submodules_state_dict(UpperCamelCase_ ,["""a.1""", """a.2"""] )
self.assertDictEqual(UpperCamelCase_ ,{"""a.1""": 0, """a.2""": 2} )
UpperCAmelCase = {"""a.1.a""": 0, """a.10.a""": 1, """a.2.a""": 2}
UpperCAmelCase = extract_submodules_state_dict(UpperCamelCase_ ,["""a.1""", """a.2"""] )
self.assertDictEqual(UpperCamelCase_ ,{"""a.1.a""": 0, """a.2.a""": 2} )
| 355 |
"""simple docstring"""
from __future__ import annotations
def _a ( _snake_case , _snake_case , _snake_case ):
"""simple docstring"""
if (voltage, current, resistance).count(0 ) != 1:
raise ValueError("""One and only one argument must be 0""" )
if resistance < 0:
raise ValueError("""Resistance cannot be negative""" )
if voltage == 0:
return {"voltage": float(current * resistance )}
elif current == 0:
return {"current": voltage / resistance}
elif resistance == 0:
return {"resistance": voltage / current}
else:
raise ValueError("""Exactly one argument must be 0""" )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 234 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
SCREAMING_SNAKE_CASE :int = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE :Any = {
'''naver-clova-ix/donut-base''': '''https://huggingface.co/naver-clova-ix/donut-base/resolve/main/config.json''',
# See all Donut models at https://huggingface.co/models?filter=donut-swin
}
class __lowerCAmelCase ( _a ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = """donut-swin"""
_SCREAMING_SNAKE_CASE = {
"""num_attention_heads""": """num_heads""",
"""num_hidden_layers""": """num_layers""",
}
def __init__( self : List[Any] , _lowerCAmelCase : Union[str, Any]=2_2_4 , _lowerCAmelCase : Optional[int]=4 , _lowerCAmelCase : str=3 , _lowerCAmelCase : Dict=9_6 , _lowerCAmelCase : Dict=[2, 2, 6, 2] , _lowerCAmelCase : List[Any]=[3, 6, 1_2, 2_4] , _lowerCAmelCase : int=7 , _lowerCAmelCase : int=4.0 , _lowerCAmelCase : Dict=True , _lowerCAmelCase : Dict=0.0 , _lowerCAmelCase : Any=0.0 , _lowerCAmelCase : List[Any]=0.1 , _lowerCAmelCase : Optional[Any]="gelu" , _lowerCAmelCase : Union[str, Any]=False , _lowerCAmelCase : Dict=0.02 , _lowerCAmelCase : Dict=1e-5 , **_lowerCAmelCase : Dict , ) -> int:
"""simple docstring"""
super().__init__(**__lowerCamelCase )
snake_case_ = image_size
snake_case_ = patch_size
snake_case_ = num_channels
snake_case_ = embed_dim
snake_case_ = depths
snake_case_ = len(__lowerCamelCase )
snake_case_ = num_heads
snake_case_ = window_size
snake_case_ = mlp_ratio
snake_case_ = qkv_bias
snake_case_ = hidden_dropout_prob
snake_case_ = attention_probs_dropout_prob
snake_case_ = drop_path_rate
snake_case_ = hidden_act
snake_case_ = use_absolute_embeddings
snake_case_ = layer_norm_eps
snake_case_ = initializer_range
# we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
snake_case_ = int(embed_dim * 2 ** (len(__lowerCamelCase ) - 1) )
| 159 |
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Audio, Features, Value
from .base import TaskTemplate
@dataclass(frozen=_a )
class _SCREAMING_SNAKE_CASE ( _a ):
snake_case__ : str = field(default="""automatic-speech-recognition""" , metadata={"""include_in_asdict_even_if_is_default""": True} )
snake_case__ : ClassVar[Features] = Features({"""audio""": Audio()} )
snake_case__ : ClassVar[Features] = Features({"""transcription""": Value("""string""" )} )
snake_case__ : str = "audio"
snake_case__ : str = "transcription"
def _A ( self : List[str] , __lowerCamelCase : Dict ):
if self.audio_column not in features:
raise ValueError(F"""Column {self.audio_column} is not present in features.""" )
if not isinstance(features[self.audio_column] , __lowerCamelCase ):
raise ValueError(F"""Column {self.audio_column} is not an Audio type.""" )
UpperCamelCase :int = copy.deepcopy(self )
UpperCamelCase :Any = self.input_schema.copy()
UpperCamelCase :List[str] = features[self.audio_column]
UpperCamelCase :List[Any] = input_schema
return task_template
@property
def _A ( self : Optional[int] ):
return {self.audio_column: "audio", self.transcription_column: "transcription"}
| 38 | 0 |
import sys
from .dependency_versions_table import deps
from .utils.versions import require_version, require_version_core
# define which module versions we always want to check at run time
# (usually the ones defined in `install_requires` in setup.py)
#
# order specific notes:
# - tqdm must be checked before tokenizers
snake_case__ : List[str] = "python tqdm regex requests packaging filelock numpy tokenizers".split()
if sys.version_info < (3, 7):
pkgs_to_check_at_runtime.append("dataclasses")
if sys.version_info < (3, 8):
pkgs_to_check_at_runtime.append("importlib_metadata")
for pkg in pkgs_to_check_at_runtime:
if pkg in deps:
if pkg == "tokenizers":
# must be loaded here, or else tqdm check may fail
from .utils import is_tokenizers_available
if not is_tokenizers_available():
continue # not required, check version only if installed
require_version_core(deps[pkg])
else:
raise ValueError(f'can\'t find {pkg} in {deps.keys()}, check dependency_versions_table.py')
def _a ( lowerCamelCase: Dict , lowerCamelCase: List[Any]=None ) -> Dict:
'''simple docstring'''
require_version(deps[pkg] , _SCREAMING_SNAKE_CASE )
| 351 |
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
snake_case__ : str = logging.get_logger(__name__)
snake_case__ : Optional[int] = {
'google/pix2struct-textcaps-base': (
'https://huggingface.co/google/pix2struct-textcaps-base/resolve/main/config.json'
),
}
class A_ ( _lowerCamelCase ):
lowerCAmelCase__ = """pix2struct_text_model"""
lowerCAmelCase__ = ["""past_key_values"""]
lowerCAmelCase__ = {
"""hidden_size""": """hidden_size""",
"""num_attention_heads""": """num_heads""",
"""num_hidden_layers""": """num_layers""",
}
def __init__(self :Any , _UpperCamelCase :int=5_0244 , _UpperCamelCase :Optional[Any]=768 , _UpperCamelCase :Optional[Any]=64 , _UpperCamelCase :Dict=2048 , _UpperCamelCase :int=12 , _UpperCamelCase :Optional[int]=12 , _UpperCamelCase :Optional[int]=32 , _UpperCamelCase :Dict=128 , _UpperCamelCase :Tuple=0.1 , _UpperCamelCase :List[str]=1e-6 , _UpperCamelCase :Optional[Any]=1.0 , _UpperCamelCase :Union[str, Any]="gelu_new" , _UpperCamelCase :int=0 , _UpperCamelCase :int=False , _UpperCamelCase :int=0 , _UpperCamelCase :Dict=1 , _UpperCamelCase :Any=False , _UpperCamelCase :Optional[Any]=True , **_UpperCamelCase :Tuple , )-> Dict:
__A = vocab_size
__A = hidden_size
__A = d_kv
__A = d_ff
__A = num_layers
__A = num_heads
__A = relative_attention_num_buckets
__A = relative_attention_max_distance
__A = dropout_rate
__A = layer_norm_epsilon
__A = initializer_factor
__A = use_cache
__A = eos_token_id
__A = decoder_start_token_id
# for backwards compatibility
__A = dense_act_fn
super().__init__(
pad_token_id=_UpperCamelCase , eos_token_id=_UpperCamelCase , decoder_start_token_id=_UpperCamelCase , tie_word_embeddings=_UpperCamelCase , is_decoder=_UpperCamelCase , **_UpperCamelCase , )
@classmethod
def _lowerCAmelCase (cls :List[str] , _UpperCamelCase :Union[str, os.PathLike] , **_UpperCamelCase :List[Any] )-> "PretrainedConfig":
cls._set_token_in_kwargs(_UpperCamelCase )
__A , __A = cls.get_config_dict(_UpperCamelCase , **_UpperCamelCase )
# get the text config dict if we are loading from Pix2StructConfig
if config_dict.get('''model_type''' ) == "pix2struct":
__A = config_dict['''text_config''']
if "model_type" in config_dict and hasattr(cls , '''model_type''' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f"""You are using a model of type {config_dict['model_type']} to instantiate a model of type """
f"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(_UpperCamelCase , **_UpperCamelCase )
class A_ ( _lowerCamelCase ):
lowerCAmelCase__ = """pix2struct_vision_model"""
def __init__(self :Dict , _UpperCamelCase :Optional[Any]=768 , _UpperCamelCase :List[str]=768 , _UpperCamelCase :Any=2048 , _UpperCamelCase :Tuple=64 , _UpperCamelCase :int=12 , _UpperCamelCase :Optional[int]=12 , _UpperCamelCase :Tuple="gelu_new" , _UpperCamelCase :Dict=1e-6 , _UpperCamelCase :int=0.0 , _UpperCamelCase :int=0.0 , _UpperCamelCase :Union[str, Any]=1e-10 , _UpperCamelCase :Tuple=1.0 , _UpperCamelCase :Tuple=4096 , _UpperCamelCase :List[str]=32 , _UpperCamelCase :Optional[Any]=128 , **_UpperCamelCase :List[str] , )-> Any:
super().__init__(**_UpperCamelCase )
__A = hidden_size
__A = patch_embed_hidden_size
__A = d_ff
__A = dropout_rate
__A = num_hidden_layers
__A = num_attention_heads
__A = initializer_range
__A = initializer_factor
__A = attention_dropout
__A = layer_norm_eps
__A = dense_act_fn
__A = seq_len
__A = relative_attention_num_buckets
__A = relative_attention_max_distance
__A = d_kv
@classmethod
def _lowerCAmelCase (cls :List[str] , _UpperCamelCase :Union[str, os.PathLike] , **_UpperCamelCase :List[str] )-> "PretrainedConfig":
cls._set_token_in_kwargs(_UpperCamelCase )
__A , __A = cls.get_config_dict(_UpperCamelCase , **_UpperCamelCase )
# get the vision config dict if we are loading from Pix2StructConfig
if config_dict.get('''model_type''' ) == "pix2struct":
__A = config_dict['''vision_config''']
if "model_type" in config_dict and hasattr(cls , '''model_type''' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f"""You are using a model of type {config_dict['model_type']} to instantiate a model of type """
f"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(_UpperCamelCase , **_UpperCamelCase )
class A_ ( _lowerCamelCase ):
lowerCAmelCase__ = """pix2struct"""
lowerCAmelCase__ = True
def __init__(self :List[Any] , _UpperCamelCase :str=None , _UpperCamelCase :int=None , _UpperCamelCase :List[Any]=1.0 , _UpperCamelCase :int=0.0_2 , _UpperCamelCase :List[str]=False , _UpperCamelCase :Optional[Any]=False , _UpperCamelCase :int=True , **_UpperCamelCase :Any , )-> Optional[Any]:
super().__init__(tie_word_embeddings=_UpperCamelCase , is_encoder_decoder=_UpperCamelCase , **_UpperCamelCase )
if text_config is None:
__A = {}
logger.info('''text_config is None. Initializing the Pix2StructTextConfig with default values.''' )
if vision_config is None:
__A = {}
logger.info('''vision_config is None. Initializing the Pix2StructVisionConfig with default values.''' )
__A = PixaStructTextConfig(**_UpperCamelCase )
__A = PixaStructVisionConfig(**_UpperCamelCase )
__A = self.text_config.decoder_start_token_id
__A = self.text_config.pad_token_id
__A = self.text_config.eos_token_id
__A = initializer_factor
__A = initializer_range
__A = self.initializer_range
__A = self.initializer_range
__A = is_vqa
@classmethod
def _lowerCAmelCase (cls :str , _UpperCamelCase :PixaStructTextConfig , _UpperCamelCase :PixaStructVisionConfig , **_UpperCamelCase :Union[str, Any] )-> List[str]:
return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **_UpperCamelCase )
def _lowerCAmelCase (self :Union[str, Any] )-> int:
__A = copy.deepcopy(self.__dict__ )
__A = self.text_config.to_dict()
__A = self.vision_config.to_dict()
__A = self.__class__.model_type
return output
| 250 | 0 |
import json
import os
import re
import unittest
from transformers import CodeGenTokenizer, CodeGenTokenizerFast
from transformers.models.codegen.tokenization_codegen import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class lowercase ( UpperCAmelCase__ , unittest.TestCase ):
__lowercase : Dict = CodeGenTokenizer
__lowercase : List[Any] = CodeGenTokenizerFast
__lowercase : Optional[int] = True
__lowercase : int = {"add_prefix_space": True}
__lowercase : str = False
def __UpperCamelCase ( self ) -> Dict:
"""simple docstring"""
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
UpperCamelCase = [
'l',
'o',
'w',
'e',
'r',
's',
't',
'i',
'd',
'n',
'\u0120',
'\u0120l',
'\u0120n',
'\u0120lo',
'\u0120low',
'er',
'\u0120lowest',
'\u0120newer',
'\u0120wider',
'<unk>',
'<|endoftext|>',
]
UpperCamelCase = dict(zip(__lowercase , range(len(__lowercase ) ) ) )
UpperCamelCase = ['#version: 0.2', '\u0120 l', '\u0120l o', '\u0120lo w', 'e r', '']
UpperCamelCase = {'unk_token': '<unk>'}
UpperCamelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
UpperCamelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp:
fp.write(json.dumps(__lowercase ) + '\n' )
with open(self.merges_file , 'w' , encoding='utf-8' ) as fp:
fp.write('\n'.join(__lowercase ) )
def __UpperCamelCase ( self , **A_ ) -> Any:
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return CodeGenTokenizer.from_pretrained(self.tmpdirname , **__lowercase )
def __UpperCamelCase ( self , **A_ ) -> Tuple:
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return CodeGenTokenizerFast.from_pretrained(self.tmpdirname , **__lowercase )
def __UpperCamelCase ( self , A_ ) -> Dict:
"""simple docstring"""
UpperCamelCase = 'lower newer'
UpperCamelCase = 'lower newer'
return input_text, output_text
def __UpperCamelCase ( self ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase = CodeGenTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
UpperCamelCase = 'lower newer'
UpperCamelCase = ['\u0120low', 'er', '\u0120', 'n', 'e', 'w', 'er']
UpperCamelCase = tokenizer.tokenize(__lowercase , add_prefix_space=__lowercase )
self.assertListEqual(__lowercase , __lowercase )
UpperCamelCase = tokens + [tokenizer.unk_token]
UpperCamelCase = [14, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__lowercase ) , __lowercase )
def __UpperCamelCase ( self ) -> Tuple:
"""simple docstring"""
if not self.test_rust_tokenizer:
return
UpperCamelCase = self.get_tokenizer()
UpperCamelCase = self.get_rust_tokenizer(add_prefix_space=__lowercase )
UpperCamelCase = 'lower newer'
# Testing tokenization
UpperCamelCase = tokenizer.tokenize(__lowercase , add_prefix_space=__lowercase )
UpperCamelCase = rust_tokenizer.tokenize(__lowercase )
self.assertListEqual(__lowercase , __lowercase )
# Testing conversion to ids without special tokens
UpperCamelCase = tokenizer.encode(__lowercase , add_special_tokens=__lowercase , add_prefix_space=__lowercase )
UpperCamelCase = rust_tokenizer.encode(__lowercase , add_special_tokens=__lowercase )
self.assertListEqual(__lowercase , __lowercase )
# Testing conversion to ids with special tokens
UpperCamelCase = self.get_rust_tokenizer(add_prefix_space=__lowercase )
UpperCamelCase = tokenizer.encode(__lowercase , add_prefix_space=__lowercase )
UpperCamelCase = rust_tokenizer.encode(__lowercase )
self.assertListEqual(__lowercase , __lowercase )
# Testing the unknown token
UpperCamelCase = tokens + [rust_tokenizer.unk_token]
UpperCamelCase = [14, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(rust_tokenizer.convert_tokens_to_ids(__lowercase ) , __lowercase )
def __UpperCamelCase ( self , *A_ , **A_ ) -> int:
"""simple docstring"""
pass
def __UpperCamelCase ( self , A_=15 ) -> Optional[Any]:
"""simple docstring"""
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
UpperCamelCase = self.rust_tokenizer_class.from_pretrained(__lowercase , **__lowercase )
# Simple input
UpperCamelCase = 'This is a simple input'
UpperCamelCase = ['This is a simple input 1', 'This is a simple input 2']
UpperCamelCase = ('This is a simple input', 'This is a pair')
UpperCamelCase = [
('This is a simple input 1', 'This is a simple input 2'),
('This is a simple pair 1', 'This is a simple pair 2'),
]
# Simple input tests
self.assertRaises(__lowercase , tokenizer_r.encode , __lowercase , max_length=__lowercase , padding='max_length' )
# Simple input
self.assertRaises(__lowercase , tokenizer_r.encode_plus , __lowercase , max_length=__lowercase , padding='max_length' )
# Simple input
self.assertRaises(
__lowercase , tokenizer_r.batch_encode_plus , __lowercase , max_length=__lowercase , padding='max_length' , )
# Pair input
self.assertRaises(__lowercase , tokenizer_r.encode , __lowercase , max_length=__lowercase , padding='max_length' )
# Pair input
self.assertRaises(__lowercase , tokenizer_r.encode_plus , __lowercase , max_length=__lowercase , padding='max_length' )
# Pair input
self.assertRaises(
__lowercase , tokenizer_r.batch_encode_plus , __lowercase , max_length=__lowercase , padding='max_length' , )
def __UpperCamelCase ( self ) -> List[Any]:
"""simple docstring"""
UpperCamelCase = CodeGenTokenizer.from_pretrained(self.tmpdirname , pad_token='<pad>' )
# Simple input
UpperCamelCase = 'This is a simple input'
UpperCamelCase = ['This is a simple input looooooooong', 'This is a simple input']
UpperCamelCase = ('This is a simple input', 'This is a pair')
UpperCamelCase = [
('This is a simple input loooooong', 'This is a simple input'),
('This is a simple pair loooooong', 'This is a simple pair'),
]
UpperCamelCase = tokenizer.pad_token_id
UpperCamelCase = tokenizer(__lowercase , padding='max_length' , max_length=30 , return_tensors='np' )
UpperCamelCase = tokenizer(__lowercase , padding=__lowercase , truncate=__lowercase , return_tensors='np' )
UpperCamelCase = tokenizer(*__lowercase , padding='max_length' , max_length=60 , return_tensors='np' )
UpperCamelCase = tokenizer(__lowercase , padding=__lowercase , truncate=__lowercase , return_tensors='np' )
# s
# test single string max_length padding
self.assertEqual(out_s['input_ids'].shape[-1] , 30 )
self.assertTrue(pad_token_id in out_s['input_ids'] )
self.assertTrue(0 in out_s['attention_mask'] )
# s2
# test automatic padding
self.assertEqual(out_sa['input_ids'].shape[-1] , 33 )
# long slice doesn't have padding
self.assertFalse(pad_token_id in out_sa['input_ids'][0] )
self.assertFalse(0 in out_sa['attention_mask'][0] )
# short slice does have padding
self.assertTrue(pad_token_id in out_sa['input_ids'][1] )
self.assertTrue(0 in out_sa['attention_mask'][1] )
# p
# test single pair max_length padding
self.assertEqual(out_p['input_ids'].shape[-1] , 60 )
self.assertTrue(pad_token_id in out_p['input_ids'] )
self.assertTrue(0 in out_p['attention_mask'] )
# p2
# test automatic padding pair
self.assertEqual(out_pa['input_ids'].shape[-1] , 52 )
# long slice pair doesn't have padding
self.assertFalse(pad_token_id in out_pa['input_ids'][0] )
self.assertFalse(0 in out_pa['attention_mask'][0] )
# short slice pair does have padding
self.assertTrue(pad_token_id in out_pa['input_ids'][1] )
self.assertTrue(0 in out_pa['attention_mask'][1] )
def __UpperCamelCase ( self ) -> List[Any]:
"""simple docstring"""
UpperCamelCase = '$$$'
UpperCamelCase = CodeGenTokenizer.from_pretrained(self.tmpdirname , bos_token=__lowercase , add_bos_token=__lowercase )
UpperCamelCase = 'This is a simple input'
UpperCamelCase = ['This is a simple input 1', 'This is a simple input 2']
UpperCamelCase = tokenizer.bos_token_id
UpperCamelCase = tokenizer(__lowercase )
UpperCamelCase = tokenizer(__lowercase )
self.assertEqual(out_s.input_ids[0] , __lowercase )
self.assertTrue(all(o[0] == bos_token_id for o in out_sa.input_ids ) )
UpperCamelCase = tokenizer.decode(out_s.input_ids )
UpperCamelCase = tokenizer.batch_decode(out_sa.input_ids )
self.assertEqual(decode_s.split()[0] , __lowercase )
self.assertTrue(all(d.split()[0] == bos_token for d in decode_sa ) )
@slow
def __UpperCamelCase ( self ) -> str:
"""simple docstring"""
UpperCamelCase = CodeGenTokenizer.from_pretrained('Salesforce/codegen-350M-mono' )
UpperCamelCase = '\nif len_a > len_b:\n result = a\nelse:\n result = b\n\n\n\n#'
UpperCamelCase = '\nif len_a > len_b: result = a\nelse: result = b'
UpperCamelCase = tokenizer.encode(__lowercase )
UpperCamelCase = ['^#', re.escape('<|endoftext|>' ), '^\'\'\'', '^\"\"\"', '\n\n\n']
UpperCamelCase = tokenizer.decode(__lowercase , truncate_before_pattern=__lowercase )
self.assertEqual(__lowercase , __lowercase )
def __UpperCamelCase ( self ) -> int:
"""simple docstring"""
pass
| 222 |
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from argparse import ArgumentParser
from accelerate.commands.config import get_config_parser
from accelerate.commands.env import env_command_parser
from accelerate.commands.launch import launch_command_parser
from accelerate.commands.test import test_command_parser
from accelerate.commands.tpu import tpu_command_parser
def lowerCamelCase__ ( ):
'''simple docstring'''
snake_case_ = ArgumentParser("Accelerate CLI tool" , usage="accelerate <command> [<args>]" , allow_abbrev=_A )
snake_case_ = parser.add_subparsers(help="accelerate command helpers" )
# Register commands
get_config_parser(subparsers=_A )
env_command_parser(subparsers=_A )
launch_command_parser(subparsers=_A )
tpu_command_parser(subparsers=_A )
test_command_parser(subparsers=_A )
# Let's go
snake_case_ = parser.parse_args()
if not hasattr(_A , "func" ):
parser.print_help()
exit(1 )
# Run
args.func(_A )
if __name__ == "__main__":
main()
| 187 | 0 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase : Optional[int] = logging.get_logger(__name__)
UpperCamelCase : Dict = {
"""microsoft/markuplm-base""": """https://huggingface.co/microsoft/markuplm-base/resolve/main/config.json""",
"""microsoft/markuplm-large""": """https://huggingface.co/microsoft/markuplm-large/resolve/main/config.json""",
}
class UpperCamelCase ( a_ ):
"""simple docstring"""
A : Dict = "markuplm"
def __init__( self : int , UpperCAmelCase_ : Dict=3_0_5_2_2 , UpperCAmelCase_ : Optional[Any]=7_6_8 , UpperCAmelCase_ : Tuple=1_2 , UpperCAmelCase_ : List[str]=1_2 , UpperCAmelCase_ : int=3_0_7_2 , UpperCAmelCase_ : str="gelu" , UpperCAmelCase_ : Dict=0.1 , UpperCAmelCase_ : Union[str, Any]=0.1 , UpperCAmelCase_ : Any=5_1_2 , UpperCAmelCase_ : int=2 , UpperCAmelCase_ : int=0.02 , UpperCAmelCase_ : Tuple=1e-12 , UpperCAmelCase_ : Tuple=0 , UpperCAmelCase_ : str=0 , UpperCAmelCase_ : List[Any]=2 , UpperCAmelCase_ : Any=2_5_6 , UpperCAmelCase_ : Optional[int]=1_0_2_4 , UpperCAmelCase_ : Optional[Any]=2_1_6 , UpperCAmelCase_ : Union[str, Any]=1_0_0_1 , UpperCAmelCase_ : List[Any]=3_2 , UpperCAmelCase_ : List[Any]=5_0 , UpperCAmelCase_ : str="absolute" , UpperCAmelCase_ : List[str]=True , UpperCAmelCase_ : List[str]=None , **UpperCAmelCase_ : Dict , ):
"""simple docstring"""
super().__init__(
pad_token_id=UpperCAmelCase_ , bos_token_id=UpperCAmelCase_ , eos_token_id=UpperCAmelCase_ , **UpperCAmelCase_ , )
a : str = vocab_size
a : Tuple = hidden_size
a : Any = num_hidden_layers
a : int = num_attention_heads
a : Dict = hidden_act
a : Optional[int] = intermediate_size
a : Optional[Any] = hidden_dropout_prob
a : Dict = attention_probs_dropout_prob
a : Dict = max_position_embeddings
a : List[Any] = type_vocab_size
a : str = initializer_range
a : Dict = layer_norm_eps
a : Dict = position_embedding_type
a : Optional[int] = use_cache
a : Dict = classifier_dropout
# additional properties
a : Optional[Any] = max_depth
a : Optional[Any] = max_xpath_tag_unit_embeddings
a : Tuple = max_xpath_subs_unit_embeddings
a : int = tag_pad_id
a : Dict = subs_pad_id
a : List[str] = xpath_unit_hidden_size
| 369 | '''simple docstring'''
from typing import Dict, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_torch_available, is_torch_tensor, logging
if is_torch_available():
import torch
UpperCamelCase : Dict = logging.get_logger(__name__)
class UpperCamelCase ( a_ ):
"""simple docstring"""
A : Any = ["pixel_values"]
def __init__( self : str , UpperCAmelCase_ : bool = True , UpperCAmelCase_ : Optional[Dict[str, int]] = None , UpperCAmelCase_ : PILImageResampling = PILImageResampling.BILINEAR , UpperCAmelCase_ : bool = True , UpperCAmelCase_ : Dict[str, int] = None , UpperCAmelCase_ : bool = True , UpperCAmelCase_ : Union[int, float] = 1 / 2_5_5 , UpperCAmelCase_ : bool = True , UpperCAmelCase_ : Optional[Union[float, List[float]]] = None , UpperCAmelCase_ : Optional[Union[float, List[float]]] = None , **UpperCAmelCase_ : List[str] , ):
"""simple docstring"""
super().__init__(**UpperCAmelCase_)
a : str = size if size is not None else {'shortest_edge': 2_5_6}
a : Dict = get_size_dict(UpperCAmelCase_ , default_to_square=UpperCAmelCase_)
a : str = crop_size if crop_size is not None else {'height': 2_2_4, 'width': 2_2_4}
a : int = get_size_dict(UpperCAmelCase_ , param_name='crop_size')
a : Any = do_resize
a : List[str] = size
a : Union[str, Any] = resample
a : int = do_center_crop
a : Optional[int] = crop_size
a : Tuple = do_rescale
a : int = rescale_factor
a : Optional[Any] = do_normalize
a : Dict = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
a : Any = image_std if image_std is not None else IMAGENET_STANDARD_STD
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] , UpperCAmelCase_ : np.ndarray , UpperCAmelCase_ : Dict[str, int] , UpperCAmelCase_ : PILImageResampling = PILImageResampling.BICUBIC , UpperCAmelCase_ : Optional[Union[str, ChannelDimension]] = None , **UpperCAmelCase_ : Optional[int] , ):
"""simple docstring"""
a : Optional[int] = get_size_dict(UpperCAmelCase_ , default_to_square=UpperCAmelCase_)
if "shortest_edge" not in size:
raise ValueError(f"""The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}""")
a : Union[str, Any] = get_resize_output_image_size(UpperCAmelCase_ , size=size['shortest_edge'] , default_to_square=UpperCAmelCase_)
return resize(UpperCAmelCase_ , size=UpperCAmelCase_ , resample=UpperCAmelCase_ , data_format=UpperCAmelCase_ , **UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ ( self : Tuple , UpperCAmelCase_ : np.ndarray , UpperCAmelCase_ : Dict[str, int] , UpperCAmelCase_ : Optional[Union[str, ChannelDimension]] = None , **UpperCAmelCase_ : Optional[int] , ):
"""simple docstring"""
a : List[str] = get_size_dict(UpperCAmelCase_)
if "height" not in size or "width" not in size:
raise ValueError(f"""The `size` parameter must contain the keys `height` and `width`. Got {size.keys()}""")
return center_crop(UpperCAmelCase_ , size=(size['height'], size['width']) , data_format=UpperCAmelCase_ , **UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] , UpperCAmelCase_ : np.ndarray , UpperCAmelCase_ : float , UpperCAmelCase_ : Optional[Union[str, ChannelDimension]] = None , **UpperCAmelCase_ : Optional[Any]):
"""simple docstring"""
return rescale(UpperCAmelCase_ , scale=UpperCAmelCase_ , data_format=UpperCAmelCase_ , **UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ ( self : Tuple , UpperCAmelCase_ : np.ndarray , UpperCAmelCase_ : Union[float, List[float]] , UpperCAmelCase_ : Union[float, List[float]] , UpperCAmelCase_ : Optional[Union[str, ChannelDimension]] = None , **UpperCAmelCase_ : Optional[int] , ):
"""simple docstring"""
return normalize(UpperCAmelCase_ , mean=UpperCAmelCase_ , std=UpperCAmelCase_ , data_format=UpperCAmelCase_ , **UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ ( self : str , UpperCAmelCase_ : ImageInput , UpperCAmelCase_ : Optional[bool] = None , UpperCAmelCase_ : Dict[str, int] = None , UpperCAmelCase_ : PILImageResampling = None , UpperCAmelCase_ : bool = None , UpperCAmelCase_ : Dict[str, int] = None , UpperCAmelCase_ : Optional[bool] = None , UpperCAmelCase_ : Optional[float] = None , UpperCAmelCase_ : Optional[bool] = None , UpperCAmelCase_ : Optional[Union[float, List[float]]] = None , UpperCAmelCase_ : Optional[Union[float, List[float]]] = None , UpperCAmelCase_ : Optional[Union[str, TensorType]] = None , UpperCAmelCase_ : Union[str, ChannelDimension] = ChannelDimension.FIRST , **UpperCAmelCase_ : List[str] , ):
"""simple docstring"""
a : int = do_resize if do_resize is not None else self.do_resize
a : int = size if size is not None else self.size
a : Union[str, Any] = get_size_dict(UpperCAmelCase_ , default_to_square=UpperCAmelCase_)
a : str = resample if resample is not None else self.resample
a : Optional[Any] = do_center_crop if do_center_crop is not None else self.do_center_crop
a : Union[str, Any] = crop_size if crop_size is not None else self.crop_size
a : Dict = get_size_dict(UpperCAmelCase_ , param_name='crop_size')
a : str = do_rescale if do_rescale is not None else self.do_rescale
a : int = rescale_factor if rescale_factor is not None else self.rescale_factor
a : str = do_normalize if do_normalize is not None else self.do_normalize
a : List[str] = image_mean if image_mean is not None else self.image_mean
a : Optional[int] = image_std if image_std is not None else self.image_std
a : Dict = make_list_of_images(UpperCAmelCase_)
if not valid_images(UpperCAmelCase_):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.')
if do_resize and size is None:
raise ValueError('Size must be specified if do_resize is True.')
if do_center_crop and crop_size is None:
raise ValueError('Crop size must be specified if do_center_crop is True.')
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.')
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('Image mean and std must be specified if do_normalize is True.')
# All transformations expect numpy arrays.
a : List[Any] = [to_numpy_array(UpperCAmelCase_) for image in images]
if do_resize:
a : Dict = [self.resize(image=UpperCAmelCase_ , size=UpperCAmelCase_ , resample=UpperCAmelCase_) for image in images]
if do_center_crop:
a : Any = [self.center_crop(image=UpperCAmelCase_ , size=UpperCAmelCase_) for image in images]
if do_rescale:
a : Optional[int] = [self.rescale(image=UpperCAmelCase_ , scale=UpperCAmelCase_) for image in images]
if do_normalize:
a : Dict = [self.normalize(image=UpperCAmelCase_ , mean=UpperCAmelCase_ , std=UpperCAmelCase_) for image in images]
a : List[Any] = [to_channel_dimension_format(UpperCAmelCase_ , UpperCAmelCase_) for image in images]
a : List[str] = {'pixel_values': images}
return BatchFeature(data=UpperCAmelCase_ , tensor_type=UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ ( self : List[str] , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : List[Tuple] = None):
"""simple docstring"""
a : Dict = outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(UpperCAmelCase_) != len(UpperCAmelCase_):
raise ValueError(
'Make sure that you pass in as many target sizes as the batch dimension of the logits')
if is_torch_tensor(UpperCAmelCase_):
a : Optional[Any] = target_sizes.numpy()
a : List[str] = []
for idx in range(len(UpperCAmelCase_)):
a : Optional[Any] = torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0) , size=target_sizes[idx] , mode='bilinear' , align_corners=UpperCAmelCase_)
a : Union[str, Any] = resized_logits[0].argmax(dim=0)
semantic_segmentation.append(UpperCAmelCase_)
else:
a : Optional[int] = logits.argmax(dim=1)
a : List[str] = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0])]
return semantic_segmentation
| 345 | 0 |
'''simple docstring'''
import argparse
from diffusers.pipelines.stable_diffusion.convert_from_ckpt import download_controlnet_from_original_ckpt
if __name__ == "__main__":
a : List[str] = argparse.ArgumentParser()
parser.add_argument(
'--checkpoint_path', default=None, type=str, required=True, help='Path to the checkpoint to convert.'
)
parser.add_argument(
'--original_config_file',
type=str,
required=True,
help='The YAML config file corresponding to the original architecture.',
)
parser.add_argument(
'--num_in_channels',
default=None,
type=int,
help='The number of input channels. If `None` number of input channels will be automatically inferred.',
)
parser.add_argument(
'--image_size',
default=512,
type=int,
help=(
'The image size that the model was trained on. Use 512 for Stable Diffusion v1.X and Stable Siffusion v2'
' Base. Use 768 for Stable Diffusion v2.'
),
)
parser.add_argument(
'--extract_ema',
action='store_true',
help=(
'Only relevant for checkpoints that have both EMA and non-EMA weights. Whether to extract the EMA weights'
' or not. Defaults to `False`. Add `--extract_ema` to extract the EMA weights. EMA weights usually yield'
' higher quality images for inference. Non-EMA weights are usually better to continue fine-tuning.'
),
)
parser.add_argument(
'--upcast_attention',
action='store_true',
help=(
'Whether the attention computation should always be upcasted. This is necessary when running stable'
' diffusion 2.1.'
),
)
parser.add_argument(
'--from_safetensors',
action='store_true',
help='If `--checkpoint_path` is in `safetensors` format, load checkpoint with safetensors instead of PyTorch.',
)
parser.add_argument(
'--to_safetensors',
action='store_true',
help='Whether to store pipeline in safetensors format or not.',
)
parser.add_argument('--dump_path', default=None, type=str, required=True, help='Path to the output model.')
parser.add_argument('--device', type=str, help='Device to use (e.g. cpu, cuda:0, cuda:1, etc.)')
def __magic_name__ ( __UpperCAmelCase ) -> Any:
'''simple docstring'''
if string == "True":
return True
elif string == "False":
return False
else:
raise ValueError(F"could not parse string as bool {string}" )
parser.add_argument(
'--use_linear_projection', help='Override for use linear projection', required=False, type=parse_bool
)
parser.add_argument('--cross_attention_dim', help='Override for cross attention_dim', required=False, type=int)
a : List[str] = parser.parse_args()
a : str = download_controlnet_from_original_ckpt(
checkpoint_path=args.checkpoint_path,
original_config_file=args.original_config_file,
image_size=args.image_size,
extract_ema=args.extract_ema,
num_in_channels=args.num_in_channels,
upcast_attention=args.upcast_attention,
from_safetensors=args.from_safetensors,
device=args.device,
use_linear_projection=args.use_linear_projection,
cross_attention_dim=args.cross_attention_dim,
)
controlnet.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
| 56 |
'''simple docstring'''
import re
from typing import Callable, List, Optional, Union
import tensorflow as tf
try:
from tensorflow.keras.optimizers.legacy import Adam
except ImportError:
from tensorflow.keras.optimizers import Adam
class a ( tf.keras.optimizers.schedules.LearningRateSchedule ):
def __init__( self : Optional[Any] , lowercase_ : float , lowercase_ : Callable , lowercase_ : int , lowercase_ : float = 1.0 , lowercase_ : str = None , ):
super().__init__()
snake_case_ = initial_learning_rate
snake_case_ = warmup_steps
snake_case_ = power
snake_case_ = decay_schedule_fn
snake_case_ = name
def __call__( self : Tuple , lowercase_ : str ):
with tf.name_scope(self.name or '''WarmUp''' ) as name:
# Implements polynomial warmup. i.e., if global_step < warmup_steps, the
# learning rate will be `global_step/num_warmup_steps * init_lr`.
snake_case_ = tf.cast(lowercase_ , tf.floataa )
snake_case_ = tf.cast(self.warmup_steps , tf.floataa )
snake_case_ = global_step_float / warmup_steps_float
snake_case_ = self.initial_learning_rate * tf.math.pow(lowercase_ , self.power )
return tf.cond(
global_step_float < warmup_steps_float , lambda: warmup_learning_rate , lambda: self.decay_schedule_fn(step - self.warmup_steps ) , name=lowercase_ , )
def A_ ( self : Any ):
return {
"initial_learning_rate": self.initial_learning_rate,
"decay_schedule_fn": self.decay_schedule_fn,
"warmup_steps": self.warmup_steps,
"power": self.power,
"name": self.name,
}
def __magic_name__ ( __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase = 0.0, __UpperCAmelCase = 0.9, __UpperCAmelCase = 0.9_9_9, __UpperCAmelCase = 1e-8, __UpperCAmelCase = None, __UpperCAmelCase = None, __UpperCAmelCase = 0.0, __UpperCAmelCase = 1.0, __UpperCAmelCase = None, ) -> List[str]:
'''simple docstring'''
snake_case_ = tf.keras.optimizers.schedules.PolynomialDecay(
initial_learning_rate=__UpperCAmelCase, decay_steps=num_train_steps - num_warmup_steps, end_learning_rate=init_lr * min_lr_ratio, power=__UpperCAmelCase, )
if num_warmup_steps:
snake_case_ = WarmUp(
initial_learning_rate=__UpperCAmelCase, decay_schedule_fn=__UpperCAmelCase, warmup_steps=__UpperCAmelCase, )
if weight_decay_rate > 0.0:
snake_case_ = AdamWeightDecay(
learning_rate=__UpperCAmelCase, weight_decay_rate=__UpperCAmelCase, beta_a=__UpperCAmelCase, beta_a=__UpperCAmelCase, epsilon=__UpperCAmelCase, clipnorm=__UpperCAmelCase, global_clipnorm=__UpperCAmelCase, exclude_from_weight_decay=['''LayerNorm''', '''layer_norm''', '''bias'''], include_in_weight_decay=__UpperCAmelCase, )
else:
snake_case_ = tf.keras.optimizers.Adam(
learning_rate=__UpperCAmelCase, beta_a=__UpperCAmelCase, beta_a=__UpperCAmelCase, epsilon=__UpperCAmelCase, clipnorm=__UpperCAmelCase, global_clipnorm=__UpperCAmelCase, )
# We return the optimizer and the LR scheduler in order to better track the
# evolution of the LR independently of the optimizer.
return optimizer, lr_schedule
class a ( _lowerCamelCase ):
def __init__( self : Dict , lowercase_ : Union[float, tf.keras.optimizers.schedules.LearningRateSchedule] = 0.001 , lowercase_ : float = 0.9 , lowercase_ : float = 0.999 , lowercase_ : float = 1e-7 , lowercase_ : bool = False , lowercase_ : float = 0.0 , lowercase_ : Optional[List[str]] = None , lowercase_ : Optional[List[str]] = None , lowercase_ : str = "AdamWeightDecay" , **lowercase_ : Optional[int] , ):
super().__init__(lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , **lowercase_ )
snake_case_ = weight_decay_rate
snake_case_ = include_in_weight_decay
snake_case_ = exclude_from_weight_decay
@classmethod
def A_ ( cls : Dict , lowercase_ : Union[str, Any] ):
snake_case_ = {'''WarmUp''': WarmUp}
return super(lowercase_ , cls ).from_config(lowercase_ , custom_objects=lowercase_ )
def A_ ( self : str , lowercase_ : Optional[int] , lowercase_ : List[str] , lowercase_ : Optional[int] ):
super(lowercase_ , self )._prepare_local(lowercase_ , lowercase_ , lowercase_ )
snake_case_ = tf.constant(
self.weight_decay_rate , name='''adam_weight_decay_rate''' )
def A_ ( self : str , lowercase_ : Union[str, Any] , lowercase_ : Dict , lowercase_ : Any ):
snake_case_ = self._do_use_weight_decay(var.name )
if do_decay:
return var.assign_sub(
learning_rate * var * apply_state[(var.device, var.dtype.base_dtype)]['''weight_decay_rate'''] , use_locking=self._use_locking , )
return tf.no_op()
def A_ ( self : Union[str, Any] , lowercase_ : int , lowercase_ : str=None , **lowercase_ : List[str] ):
snake_case_ ,snake_case_ = list(zip(*lowercase_ ) )
return super(lowercase_ , self ).apply_gradients(zip(lowercase_ , lowercase_ ) , name=lowercase_ , **lowercase_ )
def A_ ( self : List[Any] , lowercase_ : str , lowercase_ : str , lowercase_ : Any ):
if apply_state is None:
return self._decayed_lr_t[var_dtype], {}
snake_case_ = apply_state or {}
snake_case_ = apply_state.get((var_device, var_dtype) )
if coefficients is None:
snake_case_ = self._fallback_apply_state(lowercase_ , lowercase_ )
snake_case_ = coefficients
return coefficients["lr_t"], {"apply_state": apply_state}
def A_ ( self : Union[str, Any] , lowercase_ : int , lowercase_ : Union[str, Any] , lowercase_ : Optional[int]=None ):
snake_case_ ,snake_case_ = self._get_lr(var.device , var.dtype.base_dtype , lowercase_ )
snake_case_ = self._decay_weights_op(lowercase_ , lowercase_ , lowercase_ )
with tf.control_dependencies([decay] ):
return super(lowercase_ , self )._resource_apply_dense(lowercase_ , lowercase_ , **lowercase_ )
def A_ ( self : Optional[Any] , lowercase_ : Optional[Any] , lowercase_ : int , lowercase_ : str , lowercase_ : List[Any]=None ):
snake_case_ ,snake_case_ = self._get_lr(var.device , var.dtype.base_dtype , lowercase_ )
snake_case_ = self._decay_weights_op(lowercase_ , lowercase_ , lowercase_ )
with tf.control_dependencies([decay] ):
return super(lowercase_ , self )._resource_apply_sparse(lowercase_ , lowercase_ , lowercase_ , **lowercase_ )
def A_ ( self : Union[str, Any] ):
snake_case_ = super().get_config()
config.update({'''weight_decay_rate''': self.weight_decay_rate} )
return config
def A_ ( self : Optional[int] , lowercase_ : int ):
if self.weight_decay_rate == 0:
return False
if self._include_in_weight_decay:
for r in self._include_in_weight_decay:
if re.search(lowercase_ , lowercase_ ) is not None:
return True
if self._exclude_from_weight_decay:
for r in self._exclude_from_weight_decay:
if re.search(lowercase_ , lowercase_ ) is not None:
return False
return True
class a ( _lowerCamelCase ):
def __init__( self : List[Any] ):
snake_case_ = []
snake_case_ = None
@property
def A_ ( self : Union[str, Any] ):
if self._accum_steps is None:
snake_case_ = tf.Variable(
tf.constant(0 , dtype=tf.intaa ) , trainable=lowercase_ , synchronization=tf.VariableSynchronization.ON_READ , aggregation=tf.VariableAggregation.ONLY_FIRST_REPLICA , )
return self._accum_steps.value()
@property
def A_ ( self : Dict ):
if not self._gradients:
raise ValueError('''The accumulator should be called first to initialize the gradients''' )
return [gradient.value() if gradient is not None else gradient for gradient in self._gradients]
def __call__( self : Any , lowercase_ : int ):
if not self._gradients:
snake_case_ = self.step # Create the step variable.
self._gradients.extend(
[
tf.Variable(
tf.zeros_like(lowercase_ ) , trainable=lowercase_ , synchronization=tf.VariableSynchronization.ON_READ , aggregation=tf.VariableAggregation.ONLY_FIRST_REPLICA , )
if gradient is not None
else gradient
for gradient in gradients
] )
if len(lowercase_ ) != len(self._gradients ):
raise ValueError(F"Expected {len(self._gradients )} gradients, but got {len(lowercase_ )}" )
for accum_gradient, gradient in zip(self._gradients , lowercase_ ):
if accum_gradient is not None and gradient is not None:
accum_gradient.assign_add(lowercase_ )
self._accum_steps.assign_add(1 )
def A_ ( self : Optional[int] ):
if not self._gradients:
return
self._accum_steps.assign(0 )
for gradient in self._gradients:
if gradient is not None:
gradient.assign(tf.zeros_like(lowercase_ ) )
| 56 | 1 |
"""simple docstring"""
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_DEFAULT_MEAN,
IMAGENET_DEFAULT_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
is_batched,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
lowercase__ = logging.get_logger(__name__)
class __snake_case ( _lowerCamelCase ):
a__ = ["""pixel_values"""]
def __init__( self , lowercase = True , lowercase = None , lowercase = PILImageResampling.BICUBIC , lowercase = True , lowercase = True , lowercase = 1 / 2_55 , lowercase = None , lowercase = True , lowercase = None , lowercase = None , **lowercase , ) -> int:
'''simple docstring'''
super().__init__(**lowercase)
a__: Tuple = size if size is not None else {'height': 2_24, 'width': 2_24}
a__: List[Any] = get_size_dict(lowercase)
a__: Optional[Any] = crop_size if crop_size is not None else {'height': 2_24, 'width': 2_24}
a__: Dict = get_size_dict(lowercase , default_to_square=lowercase , param_name='crop_size')
a__: Optional[int] = do_resize
a__: Optional[Any] = do_rescale
a__: int = do_normalize
a__: Optional[Any] = do_center_crop
a__: Dict = crop_size
a__: List[str] = size
a__: str = resample
a__: List[str] = rescale_factor
a__: Union[str, Any] = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN
a__: List[str] = image_std if image_std is not None else IMAGENET_DEFAULT_STD
def lowerCamelCase_ ( self , lowercase , lowercase , lowercase = PILImageResampling.BILINEAR , lowercase = None , **lowercase , ) -> Dict:
'''simple docstring'''
a__: int = get_size_dict(lowercase)
if "shortest_edge" in size:
a__: Dict = get_resize_output_image_size(lowercase , size=size['shortest_edge'] , default_to_square=lowercase)
# size = get_resize_output_image_size(image, size["shortest_edge"], size["longest_edge"])
elif "height" in size and "width" in size:
a__: Optional[Any] = (size['height'], size['width'])
else:
raise ValueError(f'Size must contain \'height\' and \'width\' keys or \'shortest_edge\' key. Got {size.keys()}')
return resize(lowercase , size=lowercase , resample=lowercase , data_format=lowercase , **lowercase)
def lowerCamelCase_ ( self , lowercase , lowercase , lowercase = None , **lowercase , ) -> List[str]:
'''simple docstring'''
a__: Optional[Any] = get_size_dict(lowercase)
if "height" not in size or "width" not in size:
raise ValueError(f'The `size` parameter must contain the keys (height, width). Got {size.keys()}')
return center_crop(lowercase , size=(size['height'], size['width']) , data_format=lowercase , **lowercase)
def lowerCamelCase_ ( self , lowercase , lowercase , lowercase = None , **lowercase) -> Union[str, Any]:
'''simple docstring'''
return rescale(lowercase , scale=lowercase , data_format=lowercase , **lowercase)
def lowerCamelCase_ ( self , lowercase , lowercase , lowercase , lowercase = None , **lowercase , ) -> int:
'''simple docstring'''
return normalize(lowercase , mean=lowercase , std=lowercase , data_format=lowercase , **lowercase)
def lowerCamelCase_ ( self , lowercase , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = ChannelDimension.FIRST , **lowercase , ) -> Any:
'''simple docstring'''
a__: Dict = do_resize if do_resize is not None else self.do_resize
a__: Dict = do_rescale if do_rescale is not None else self.do_rescale
a__: int = do_normalize if do_normalize is not None else self.do_normalize
a__: Optional[int] = do_center_crop if do_center_crop is not None else self.do_center_crop
a__: List[Any] = crop_size if crop_size is not None else self.crop_size
a__: str = get_size_dict(lowercase , param_name='crop_size' , default_to_square=lowercase)
a__: List[Any] = resample if resample is not None else self.resample
a__: List[Any] = rescale_factor if rescale_factor is not None else self.rescale_factor
a__: int = image_mean if image_mean is not None else self.image_mean
a__: Union[str, Any] = image_std if image_std is not None else self.image_std
a__: Optional[int] = size if size is not None else self.size
a__: str = get_size_dict(lowercase)
if not is_batched(lowercase):
a__: int = [images]
if not valid_images(lowercase):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.')
if do_resize and size is None:
raise ValueError('Size must be specified if do_resize is True.')
if do_center_crop and crop_size is None:
raise ValueError('Crop size must be specified if do_center_crop is True.')
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.')
# All transformations expect numpy arrays.
a__: Dict = [to_numpy_array(lowercase) for image in images]
if do_resize:
a__: List[Any] = [self.resize(image=lowercase , size=lowercase , resample=lowercase) for image in images]
if do_center_crop:
a__: Optional[Any] = [self.center_crop(image=lowercase , size=lowercase) for image in images]
if do_rescale:
a__: Dict = [self.rescale(image=lowercase , scale=lowercase) for image in images]
if do_normalize:
a__: Dict = [self.normalize(image=lowercase , mean=lowercase , std=lowercase) for image in images]
a__: str = [to_channel_dimension_format(lowercase , lowercase) for image in images]
a__: Optional[int] = {'pixel_values': images}
return BatchFeature(data=lowercase , tensor_type=lowercase)
| 353 | """simple docstring"""
import argparse
import collections
import json
import os
import re
import string
import sys
import numpy as np
lowercase__ = re.compile(r'\b(a|an|the)\b', re.UNICODE)
lowercase__ = None
def __a ( ) ->List[Any]:
a__: Dict = argparse.ArgumentParser('Official evaluation script for SQuAD version 2.0.' )
parser.add_argument('data_file' , metavar='data.json' , help='Input data JSON file.' )
parser.add_argument('pred_file' , metavar='pred.json' , help='Model predictions.' )
parser.add_argument(
'--out-file' , '-o' , metavar='eval.json' , help='Write accuracy metrics to file (default is stdout).' )
parser.add_argument(
'--na-prob-file' , '-n' , metavar='na_prob.json' , help='Model estimates of probability of no answer.' )
parser.add_argument(
'--na-prob-thresh' , '-t' , type=_SCREAMING_SNAKE_CASE , default=1.0 , help='Predict "" if no-answer probability exceeds this (default = 1.0).' , )
parser.add_argument(
'--out-image-dir' , '-p' , metavar='out_images' , default=_SCREAMING_SNAKE_CASE , help='Save precision-recall curves to directory.' )
parser.add_argument('--verbose' , '-v' , action='store_true' )
if len(sys.argv ) == 1:
parser.print_help()
sys.exit(1 )
return parser.parse_args()
def __a ( _SCREAMING_SNAKE_CASE ) ->Union[str, Any]:
a__: Optional[int] = {}
for article in dataset:
for p in article["paragraphs"]:
for qa in p["qas"]:
a__: Optional[Any] = bool(qa['answers']['text'] )
return qid_to_has_ans
def __a ( _SCREAMING_SNAKE_CASE ) ->Optional[Any]:
def remove_articles(_SCREAMING_SNAKE_CASE ):
return ARTICLES_REGEX.sub(' ' , _SCREAMING_SNAKE_CASE )
def white_space_fix(_SCREAMING_SNAKE_CASE ):
return " ".join(text.split() )
def remove_punc(_SCREAMING_SNAKE_CASE ):
a__: Dict = set(string.punctuation )
return "".join(ch for ch in text if ch not in exclude )
def lower(_SCREAMING_SNAKE_CASE ):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(_SCREAMING_SNAKE_CASE ) ) ) )
def __a ( _SCREAMING_SNAKE_CASE ) ->Optional[int]:
if not s:
return []
return normalize_answer(_SCREAMING_SNAKE_CASE ).split()
def __a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->List[Any]:
return int(normalize_answer(_SCREAMING_SNAKE_CASE ) == normalize_answer(_SCREAMING_SNAKE_CASE ) )
def __a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->str:
a__: Any = get_tokens(_SCREAMING_SNAKE_CASE )
a__: Optional[int] = get_tokens(_SCREAMING_SNAKE_CASE )
a__: Optional[int] = collections.Counter(_SCREAMING_SNAKE_CASE ) & collections.Counter(_SCREAMING_SNAKE_CASE )
a__: Tuple = sum(common.values() )
if len(_SCREAMING_SNAKE_CASE ) == 0 or len(_SCREAMING_SNAKE_CASE ) == 0:
# If either is no-answer, then F1 is 1 if they agree, 0 otherwise
return int(gold_toks == pred_toks )
if num_same == 0:
return 0
a__: Any = 1.0 * num_same / len(_SCREAMING_SNAKE_CASE )
a__: Optional[int] = 1.0 * num_same / len(_SCREAMING_SNAKE_CASE )
a__: Dict = (2 * precision * recall) / (precision + recall)
return fa
def __a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->Dict:
a__: Union[str, Any] = {}
a__: Dict = {}
for article in dataset:
for p in article["paragraphs"]:
for qa in p["qas"]:
a__: Optional[int] = qa['id']
a__: List[Any] = [t for t in qa['answers']['text'] if normalize_answer(_SCREAMING_SNAKE_CASE )]
if not gold_answers:
# For unanswerable questions, only correct answer is empty string
a__: str = ['']
if qid not in preds:
print(F'Missing prediction for {qid}' )
continue
a__: Any = preds[qid]
# Take max over all gold answers
a__: List[str] = max(compute_exact(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) for a in gold_answers )
a__: Optional[int] = max(compute_fa(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) for a in gold_answers )
return exact_scores, fa_scores
def __a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->Optional[Any]:
a__: List[str] = {}
for qid, s in scores.items():
a__: List[Any] = na_probs[qid] > na_prob_thresh
if pred_na:
a__: Optional[int] = float(not qid_to_has_ans[qid] )
else:
a__: Optional[Any] = s
return new_scores
def __a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None ) ->Tuple:
if not qid_list:
a__: str = len(_SCREAMING_SNAKE_CASE )
return collections.OrderedDict(
[
('exact', 100.0 * sum(exact_scores.values() ) / total),
('f1', 100.0 * sum(fa_scores.values() ) / total),
('total', total),
] )
else:
a__: Optional[Any] = len(_SCREAMING_SNAKE_CASE )
return collections.OrderedDict(
[
('exact', 100.0 * sum(exact_scores[k] for k in qid_list ) / total),
('f1', 100.0 * sum(fa_scores[k] for k in qid_list ) / total),
('total', total),
] )
def __a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->List[str]:
for k in new_eval:
a__: List[Any] = new_eval[k]
def __a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->str:
plt.step(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , color='b' , alpha=0.2 , where='post' )
plt.fill_between(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , step='post' , alpha=0.2 , color='b' )
plt.xlabel('Recall' )
plt.ylabel('Precision' )
plt.xlim([0.0, 1.05] )
plt.ylim([0.0, 1.05] )
plt.title(_SCREAMING_SNAKE_CASE )
plt.savefig(_SCREAMING_SNAKE_CASE )
plt.clf()
def __a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None ) ->List[str]:
a__: Optional[int] = sorted(_SCREAMING_SNAKE_CASE , key=lambda _SCREAMING_SNAKE_CASE : na_probs[k] )
a__: Dict = 0.0
a__: Optional[int] = 1.0
a__: Tuple = 0.0
a__: Tuple = [1.0]
a__: Optional[Any] = [0.0]
a__: Optional[Any] = 0.0
for i, qid in enumerate(_SCREAMING_SNAKE_CASE ):
if qid_to_has_ans[qid]:
true_pos += scores[qid]
a__: Optional[Any] = true_pos / float(i + 1 )
a__: int = true_pos / float(_SCREAMING_SNAKE_CASE )
if i == len(_SCREAMING_SNAKE_CASE ) - 1 or na_probs[qid] != na_probs[qid_list[i + 1]]:
# i.e., if we can put a threshold after this point
avg_prec += cur_p * (cur_r - recalls[-1])
precisions.append(_SCREAMING_SNAKE_CASE )
recalls.append(_SCREAMING_SNAKE_CASE )
if out_image:
plot_pr_curve(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
return {"ap": 100.0 * avg_prec}
def __a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->int:
if out_image_dir and not os.path.exists(_SCREAMING_SNAKE_CASE ):
os.makedirs(_SCREAMING_SNAKE_CASE )
a__: Any = sum(1 for v in qid_to_has_ans.values() if v )
if num_true_pos == 0:
return
a__: Optional[Any] = make_precision_recall_eval(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , out_image=os.path.join(_SCREAMING_SNAKE_CASE , 'pr_exact.png' ) , title='Precision-Recall curve for Exact Match score' , )
a__: List[str] = make_precision_recall_eval(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , out_image=os.path.join(_SCREAMING_SNAKE_CASE , 'pr_f1.png' ) , title='Precision-Recall curve for F1 score' , )
a__: Optional[Any] = {k: float(_SCREAMING_SNAKE_CASE ) for k, v in qid_to_has_ans.items()}
a__: List[Any] = make_precision_recall_eval(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , out_image=os.path.join(_SCREAMING_SNAKE_CASE , 'pr_oracle.png' ) , title='Oracle Precision-Recall curve (binary task of HasAns vs. NoAns)' , )
merge_eval(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , 'pr_exact' )
merge_eval(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , 'pr_f1' )
merge_eval(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , 'pr_oracle' )
def __a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->List[str]:
if not qid_list:
return
a__: Any = [na_probs[k] for k in qid_list]
a__: List[str] = np.ones_like(_SCREAMING_SNAKE_CASE ) / float(len(_SCREAMING_SNAKE_CASE ) )
plt.hist(_SCREAMING_SNAKE_CASE , weights=_SCREAMING_SNAKE_CASE , bins=20 , range=(0.0, 1.0) )
plt.xlabel('Model probability of no-answer' )
plt.ylabel('Proportion of dataset' )
plt.title(F'Histogram of no-answer probability: {name}' )
plt.savefig(os.path.join(_SCREAMING_SNAKE_CASE , F'na_prob_hist_{name}.png' ) )
plt.clf()
def __a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->List[str]:
a__: str = sum(1 for k in qid_to_has_ans if not qid_to_has_ans[k] )
a__: List[Any] = num_no_ans
a__: Union[str, Any] = cur_score
a__: Optional[Any] = 0.0
a__: str = sorted(_SCREAMING_SNAKE_CASE , key=lambda _SCREAMING_SNAKE_CASE : na_probs[k] )
for i, qid in enumerate(_SCREAMING_SNAKE_CASE ):
if qid not in scores:
continue
if qid_to_has_ans[qid]:
a__: Tuple = scores[qid]
else:
if preds[qid]:
a__: Optional[Any] = -1
else:
a__: Optional[int] = 0
cur_score += diff
if cur_score > best_score:
a__: Dict = cur_score
a__: Optional[int] = na_probs[qid]
return 100.0 * best_score / len(_SCREAMING_SNAKE_CASE ), best_thresh
def __a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->List[str]:
a__ , a__: str = find_best_thresh(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
a__ , a__: Optional[int] = find_best_thresh(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
a__: List[Any] = best_exact
a__: Dict = exact_thresh
a__: Optional[int] = best_fa
a__: str = fa_thresh
def __a ( ) ->int:
with open(OPTS.data_file ) as f:
a__: Tuple = json.load(_SCREAMING_SNAKE_CASE )
a__: Union[str, Any] = dataset_json['data']
with open(OPTS.pred_file ) as f:
a__: Dict = json.load(_SCREAMING_SNAKE_CASE )
if OPTS.na_prob_file:
with open(OPTS.na_prob_file ) as f:
a__: Dict = json.load(_SCREAMING_SNAKE_CASE )
else:
a__: Optional[Any] = {k: 0.0 for k in preds}
a__: List[Any] = make_qid_to_has_ans(_SCREAMING_SNAKE_CASE ) # maps qid to True/False
a__: Optional[int] = [k for k, v in qid_to_has_ans.items() if v]
a__: Union[str, Any] = [k for k, v in qid_to_has_ans.items() if not v]
a__ , a__: Optional[Any] = get_raw_scores(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
a__: Any = apply_no_ans_threshold(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , OPTS.na_prob_thresh )
a__: Dict = apply_no_ans_threshold(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , OPTS.na_prob_thresh )
a__: str = make_eval_dict(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
if has_ans_qids:
a__: List[str] = make_eval_dict(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , qid_list=_SCREAMING_SNAKE_CASE )
merge_eval(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , 'HasAns' )
if no_ans_qids:
a__: Optional[Any] = make_eval_dict(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , qid_list=_SCREAMING_SNAKE_CASE )
merge_eval(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , 'NoAns' )
if OPTS.na_prob_file:
find_all_best_thresh(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
if OPTS.na_prob_file and OPTS.out_image_dir:
run_precision_recall_analysis(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , OPTS.out_image_dir )
histogram_na_prob(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , OPTS.out_image_dir , 'hasAns' )
histogram_na_prob(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , OPTS.out_image_dir , 'noAns' )
if OPTS.out_file:
with open(OPTS.out_file , 'w' ) as f:
json.dump(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
else:
print(json.dumps(_SCREAMING_SNAKE_CASE , indent=2 ) )
if __name__ == "__main__":
lowercase__ = parse_args()
if OPTS.out_image_dir:
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
main()
| 203 | 0 |
"""simple docstring"""
from __future__ import annotations
def UpperCamelCase__ ( lowercase__ : str ):
return [ord(lowercase__ ) - 96 for elem in plain]
def UpperCamelCase__ ( lowercase__ : list[int] ):
return "".join(chr(elem + 96 ) for elem in encoded )
def UpperCamelCase__ ( ):
snake_case : Any = encode(input("-> " ).strip().lower() )
print("Encoded: " , lowercase__ )
print("Decoded:" , decode(lowercase__ ) )
if __name__ == "__main__":
main()
| 148 |
"""simple docstring"""
import os
import numpy
import onnx
def UpperCamelCase__ ( lowercase__ : List[str] , lowercase__ : Optional[int] ):
snake_case : Any = a.name
snake_case : Any = b.name
snake_case : str = ""
snake_case : Dict = ""
snake_case : Optional[Any] = a == b
snake_case : Union[str, Any] = name_a
snake_case : List[str] = name_b
return res
def UpperCamelCase__ ( lowercase__ : List[Any] , lowercase__ : Tuple , lowercase__ : int ):
for i, input_name in enumerate(node_proto.input ):
if input_name == name:
node_proto.input.insert(lowercase__ , lowercase__ )
node_proto.input.pop(i + 1 )
if node_proto.op_type == "If":
_graph_replace_input_with(node_proto.attribute[0].g , lowercase__ , lowercase__ )
_graph_replace_input_with(node_proto.attribute[1].g , lowercase__ , lowercase__ )
if node_proto.op_type == "Loop":
_graph_replace_input_with(node_proto.attribute[0].g , lowercase__ , lowercase__ )
def UpperCamelCase__ ( lowercase__ : List[str] , lowercase__ : Union[str, Any] , lowercase__ : List[Any] ):
for n in graph_proto.node:
_node_replace_input_with(lowercase__ , lowercase__ , lowercase__ )
def UpperCamelCase__ ( lowercase__ : Any , lowercase__ : str , lowercase__ : List[str] ):
snake_case : str = list(model.graph.initializer )
snake_case : int = list(model_without_ext.graph.initializer )
for i, ref_i in ind_to_replace:
assert inits_with_data[i].name == inits[i].name
assert inits_with_data[ref_i].name == inits[ref_i].name
assert i > ref_i
snake_case : str = inits[i].name
snake_case : str = inits[ref_i].name
model_without_ext.graph.initializer.remove(inits[i] )
# for n in model.graph.node:
_graph_replace_input_with(model_without_ext.graph , lowercase__ , lowercase__ )
def UpperCamelCase__ ( lowercase__ : Optional[int] ):
snake_case : List[str] = os.path.dirname(lowercase__ )
snake_case : Any = os.path.basename(lowercase__ )
snake_case : Optional[int] = onnx.load(os.path.join(lowercase__ , lowercase__ ) )
snake_case : Optional[Any] = list(model.graph.initializer )
snake_case : int = set()
snake_case : Any = {}
snake_case : Optional[Any] = []
snake_case : str = 0
for i in range(len(lowercase__ ) ):
if i in dup_set:
continue
for j in range(i + 1 , len(lowercase__ ) ):
if j in dup_set:
continue
if _is_equal_tensor_proto(inits[i] , inits[j] ):
dup_set.add(lowercase__ )
dup_set.add(lowercase__ )
snake_case : Union[str, Any] = inits[j].data_type
snake_case : Tuple = numpy.prod(inits[j].dims )
if dtype == 1:
mem_size *= 4
elif dtype == 6:
mem_size *= 4
elif dtype == 7 or dtype == 11:
mem_size *= 8
else:
print("unexpected data type: " , lowercase__ )
total_reduced_size += mem_size
snake_case : Tuple = inits[i].name
snake_case : Optional[Any] = inits[j].name
if name_i in dup_map:
dup_map[name_i].append(lowercase__ )
else:
snake_case : Optional[int] = [name_j]
ind_to_replace.append((j, i) )
print("total reduced size: " , total_reduced_size / 1024 / 1024 / 1024 , "GB" )
snake_case : Tuple = sorted(lowercase__ )
_remove_dup_initializers_from_model(lowercase__ , lowercase__ , lowercase__ )
snake_case : Optional[Any] = "optimized_" + model_file_name
snake_case : Tuple = os.path.join(lowercase__ , lowercase__ )
onnx.save(lowercase__ , lowercase__ )
return new_model
| 148 | 1 |
"""simple docstring"""
import argparse
import math
import traceback
import dateutil.parser as date_parser
import requests
def A_ ( _lowerCAmelCase : Any ):
"""simple docstring"""
_a = {}
_a = job['''started_at''']
_a = job['''completed_at''']
_a = date_parser.parse(_lowerCAmelCase )
_a = date_parser.parse(_lowerCAmelCase )
_a = round((end_datetime - start_datetime).total_seconds() / 6_0.0 )
_a = start
_a = end
_a = duration_in_min
return job_info
def A_ ( _lowerCAmelCase : str, _lowerCAmelCase : List[str]=None ):
"""simple docstring"""
_a = None
if token is not None:
_a = {'''Accept''': '''application/vnd.github+json''', '''Authorization''': f'Bearer {token}'}
_a = f'https://api.github.com/repos/huggingface/transformers/actions/runs/{workflow_run_id}/jobs?per_page=100'
_a = requests.get(_lowerCAmelCase, headers=_lowerCAmelCase ).json()
_a = {}
try:
job_time.update({job['''name''']: extract_time_from_single_job(_lowerCAmelCase ) for job in result['''jobs''']} )
_a = math.ceil((result['''total_count'''] - 1_00) / 1_00 )
for i in range(_lowerCAmelCase ):
_a = requests.get(url + f'&page={i + 2}', headers=_lowerCAmelCase ).json()
job_time.update({job['''name''']: extract_time_from_single_job(_lowerCAmelCase ) for job in result['''jobs''']} )
return job_time
except Exception:
print(f'Unknown error, could not fetch links:\n{traceback.format_exc()}' )
return {}
if __name__ == "__main__":
__snake_case = argparse.ArgumentParser()
# Required parameters
parser.add_argument('''--workflow_run_id''', type=str, required=True, help='''A GitHub Actions workflow run id.''')
__snake_case = parser.parse_args()
__snake_case = get_job_time(args.workflow_run_id)
__snake_case = dict(sorted(job_time.items(), key=lambda item: item[1]["duration"], reverse=True))
for k, v in job_time.items():
print(f'{k}: {v["duration"]}') | 153 |
"""simple docstring"""
import gc
import unittest
import numpy as np
import torch
from diffusers import StableDiffusionKDiffusionPipeline
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
@slow
@require_torch_gpu
class __lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
def _UpperCAmelCase ( self ) -> int:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _UpperCAmelCase ( self ) -> Dict:
_a = StableDiffusionKDiffusionPipeline.from_pretrained('''CompVis/stable-diffusion-v1-4''' )
_a = sd_pipe.to(__UpperCAmelCase )
sd_pipe.set_progress_bar_config(disable=__UpperCAmelCase )
sd_pipe.set_scheduler('''sample_euler''' )
_a = '''A painting of a squirrel eating a burger'''
_a = torch.manual_seed(0 )
_a = sd_pipe([prompt] , generator=__UpperCAmelCase , guidance_scale=9.0 , num_inference_steps=20 , output_type='''np''' )
_a = output.images
_a = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
_a = np.array([0.0447, 0.0492, 0.0468, 0.0408, 0.0383, 0.0408, 0.0354, 0.0380, 0.0339] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def _UpperCAmelCase ( self ) -> List[str]:
_a = StableDiffusionKDiffusionPipeline.from_pretrained('''stabilityai/stable-diffusion-2-1-base''' )
_a = sd_pipe.to(__UpperCAmelCase )
sd_pipe.set_progress_bar_config(disable=__UpperCAmelCase )
sd_pipe.set_scheduler('''sample_euler''' )
_a = '''A painting of a squirrel eating a burger'''
_a = torch.manual_seed(0 )
_a = sd_pipe([prompt] , generator=__UpperCAmelCase , guidance_scale=9.0 , num_inference_steps=20 , output_type='''np''' )
_a = output.images
_a = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
_a = np.array([0.1237, 0.1320, 0.1438, 0.1359, 0.1390, 0.1132, 0.1277, 0.1175, 0.1112] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5e-1
def _UpperCAmelCase ( self ) -> str:
_a = StableDiffusionKDiffusionPipeline.from_pretrained('''stabilityai/stable-diffusion-2-1-base''' )
_a = sd_pipe.to(__UpperCAmelCase )
sd_pipe.set_progress_bar_config(disable=__UpperCAmelCase )
sd_pipe.set_scheduler('''sample_dpmpp_2m''' )
_a = '''A painting of a squirrel eating a burger'''
_a = torch.manual_seed(0 )
_a = sd_pipe(
[prompt] , generator=__UpperCAmelCase , guidance_scale=7.5 , num_inference_steps=15 , output_type='''np''' , use_karras_sigmas=__UpperCAmelCase , )
_a = output.images
_a = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
_a = np.array(
[0.11381689, 0.12112921, 0.1389457, 0.12549606, 0.1244964, 0.10831517, 0.11562866, 0.10867816, 0.10499048] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 | 153 | 1 |
"""simple docstring"""
from io import BytesIO
from typing import List, Union
import requests
from ..utils import add_end_docstrings, is_decord_available, is_torch_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_decord_available():
import numpy as np
from decord import VideoReader
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING
lowerCamelCase__ = logging.get_logger(__name__)
@add_end_docstrings(_lowerCamelCase)
class A__ ( _lowerCamelCase):
def __init__( self , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ):
super().__init__(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
requires_backends(self , 'decord' )
self.check_model_type(_SCREAMING_SNAKE_CASE )
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None ):
__lowerCAmelCase : Union[str, Any] = {}
if frame_sampling_rate is not None:
__lowerCAmelCase : Optional[int] = frame_sampling_rate
if num_frames is not None:
__lowerCAmelCase : int = num_frames
__lowerCAmelCase : Any = {}
if top_k is not None:
__lowerCAmelCase : Optional[Any] = top_k
return preprocess_params, {}, postprocess_params
def __call__( self , _SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ):
return super().__call__(_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=1 ):
if num_frames is None:
__lowerCAmelCase : Union[str, Any] = self.model.config.num_frames
if video.startswith('http://' ) or video.startswith('https://' ):
__lowerCAmelCase : Tuple = BytesIO(requests.get(_SCREAMING_SNAKE_CASE ).content )
__lowerCAmelCase : str = VideoReader(_SCREAMING_SNAKE_CASE )
videoreader.seek(0 )
__lowerCAmelCase : Union[str, Any] = 0
__lowerCAmelCase : str = num_frames * frame_sampling_rate - 1
__lowerCAmelCase : Union[str, Any] = np.linspace(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , num=_SCREAMING_SNAKE_CASE , dtype=np.intaa )
__lowerCAmelCase : int = videoreader.get_batch(_SCREAMING_SNAKE_CASE ).asnumpy()
__lowerCAmelCase : Dict = list(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : List[Any] = self.image_processor(_SCREAMING_SNAKE_CASE , return_tensors=self.framework )
return model_inputs
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : List[Any] = self.model(**_SCREAMING_SNAKE_CASE )
return model_outputs
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=5 ):
if top_k > self.model.config.num_labels:
__lowerCAmelCase : List[str] = self.model.config.num_labels
if self.framework == "pt":
__lowerCAmelCase : Union[str, Any] = model_outputs.logits.softmax(-1 )[0]
__lowerCAmelCase , __lowerCAmelCase : Any = probs.topk(_SCREAMING_SNAKE_CASE )
else:
raise ValueError(f"Unsupported framework: {self.framework}" )
__lowerCAmelCase : Any = scores.tolist()
__lowerCAmelCase : List[str] = ids.tolist()
return [{"score": score, "label": self.model.config.idalabel[_id]} for score, _id in zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )] | 86 |
'''simple docstring'''
import copy
from typing import TYPE_CHECKING, Any, Mapping, Optional, OrderedDict
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto.configuration_auto import AutoConfig
if TYPE_CHECKING:
from ... import PreTrainedTokenizerBase, TensorType
UpperCamelCase__ : Optional[Any] = logging.get_logger(__name__)
class _lowerCAmelCase ( __A ):
"""simple docstring"""
lowerCamelCase = '''vision-encoder-decoder'''
lowerCamelCase = True
def __init__( self , **_lowerCamelCase ) -> str:
super().__init__(**_lowerCamelCase )
if "encoder" not in kwargs or "decoder" not in kwargs:
raise ValueError(
F"A configuraton of type {self.model_type} cannot be instantiated because "
F"not both `encoder` and `decoder` sub-configurations are passed, but only {kwargs}" )
A_ : Optional[int] = kwargs.pop("""encoder""" )
A_ : List[str] = encoder_config.pop("""model_type""" )
A_ : str = kwargs.pop("""decoder""" )
A_ : Optional[Any] = decoder_config.pop("""model_type""" )
A_ : List[str] = AutoConfig.for_model(_lowerCamelCase , **_lowerCamelCase )
A_ : str = AutoConfig.for_model(_lowerCamelCase , **_lowerCamelCase )
A_ : Any = True
@classmethod
def UpperCAmelCase_ ( cls , _lowerCamelCase , _lowerCamelCase , **_lowerCamelCase ) -> PretrainedConfig:
logger.info("""Setting `config.is_decoder=True` and `config.add_cross_attention=True` for decoder_config""" )
A_ : int = True
A_ : List[Any] = True
return cls(encoder=encoder_config.to_dict() , decoder=decoder_config.to_dict() , **_lowerCamelCase )
def UpperCAmelCase_ ( self ) -> Any:
A_ : Dict = copy.deepcopy(self.__dict__ )
A_ : List[str] = self.encoder.to_dict()
A_ : Union[str, Any] = self.decoder.to_dict()
A_ : str = self.__class__.model_type
return output
class _lowerCAmelCase ( __A ):
"""simple docstring"""
lowerCamelCase = version.parse('''1.11''' )
@property
def UpperCAmelCase_ ( self ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
@property
def UpperCAmelCase_ ( self ) -> float:
return 1e-4
@property
def UpperCAmelCase_ ( self ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict({"""last_hidden_state""": {0: """batch""", 1: """encoder_sequence"""}} )
class _lowerCAmelCase ( __A ):
"""simple docstring"""
@property
def UpperCAmelCase_ ( self ) -> Mapping[str, Mapping[int, str]]:
A_ : Optional[Any] = OrderedDict()
A_ : Any = {0: """batch""", 1: """past_decoder_sequence + sequence"""}
A_ : str = {0: """batch""", 1: """past_decoder_sequence + sequence"""}
A_ : Optional[int] = {0: """batch""", 1: """encoder_sequence"""}
return common_inputs
def UpperCAmelCase_ ( self , _lowerCamelCase , _lowerCamelCase = -1 , _lowerCamelCase = -1 , _lowerCamelCase = False , _lowerCamelCase = None , ) -> Mapping[str, Any]:
import torch
A_ : Optional[int] = OrderedDict()
A_ : List[Any] = super().generate_dummy_inputs(
_lowerCamelCase , batch_size=_lowerCamelCase , seq_length=_lowerCamelCase , is_pair=_lowerCamelCase , framework=_lowerCamelCase )
A_ , A_ : str = dummy_input["""input_ids"""].shape
A_ : Optional[int] = (batch, encoder_sequence, self._config.encoder_hidden_size)
A_ : Union[str, Any] = dummy_input.pop("""input_ids""" )
A_ : List[str] = dummy_input.pop("""attention_mask""" )
A_ : Optional[int] = torch.zeros(_lowerCamelCase )
return common_inputs
class _lowerCAmelCase ( __A ):
"""simple docstring"""
@property
def UpperCAmelCase_ ( self ) -> None:
pass
def UpperCAmelCase_ ( self , _lowerCamelCase ) -> OnnxConfig:
return VisionEncoderDecoderEncoderOnnxConfig(_lowerCamelCase )
def UpperCAmelCase_ ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = "default" ) -> OnnxConfig:
A_ : List[Any] = encoder_config.hidden_size
return VisionEncoderDecoderDecoderOnnxConfig(_lowerCamelCase , _lowerCamelCase )
| 344 | 0 |
import random
class lowerCamelCase_ :
'''simple docstring'''
@staticmethod
def SCREAMING_SNAKE_CASE__ ( __lowerCamelCase : Optional[Any] ) -> tuple[list[int], list[int]]:
A : List[Any] = [ord(lowerCAmelCase_ ) for i in text]
A : List[str] = []
A : Optional[Any] = []
for i in plain:
A : Dict = random.randint(1 , 3_00 )
A : Dict = (i + k) * k
cipher.append(lowerCAmelCase_ )
key.append(lowerCAmelCase_ )
return cipher, key
@staticmethod
def SCREAMING_SNAKE_CASE__ ( __lowerCamelCase : List[str] , __lowerCamelCase : Any ) -> str:
A : int = []
for i in range(len(lowerCAmelCase_ ) ):
A : Optional[Any] = int((cipher[i] - (key[i]) ** 2) / key[i] )
plain.append(chr(lowerCAmelCase_ ) )
return "".join(lowerCAmelCase_ )
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = Onepad().encrypt("""Hello""")
print(c, k)
print(Onepad().decrypt(c, k)) | 365 |
from collections import deque
from .hash_table import HashTable
class lowerCamelCase_ ( _A ):
'''simple docstring'''
def __init__( self : Optional[int] , *__lowerCamelCase : int , **__lowerCamelCase : Tuple ) -> Optional[Any]:
super().__init__(*__lowerCamelCase , **__lowerCamelCase )
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] , __lowerCamelCase : List[Any] , __lowerCamelCase : List[Any] ) -> Optional[int]:
A : Any = deque([] ) if self.values[key] is None else self.values[key]
self.values[key].appendleft(__lowerCamelCase )
A : int = self.values[key]
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ) -> Optional[int]:
return (
sum(self.charge_factor - len(__lowerCamelCase ) for slot in self.values )
/ self.size_table
* self.charge_factor
)
def SCREAMING_SNAKE_CASE__ ( self : Dict , __lowerCamelCase : int , __lowerCamelCase : Tuple=None ) -> List[str]:
if not (
len(self.values[key] ) == self.charge_factor and self.values.count(__lowerCamelCase ) == 0
):
return key
return super()._collision_resolution(__lowerCamelCase , __lowerCamelCase ) | 256 | 0 |
'''simple docstring'''
from string import ascii_lowercase, ascii_uppercase
def UpperCamelCase ( _lowerCamelCase : str ):
if not sentence:
return ""
A__ = dict(zip(_lowerCamelCase , _lowerCamelCase ) )
return lower_to_upper.get(sentence[0] , sentence[0] ) + sentence[1:]
if __name__ == "__main__":
from doctest import testmod
testmod()
| 237 |
'''simple docstring'''
import torch
from diffusers import DPMSolverSDEScheduler
from diffusers.utils import torch_device
from diffusers.utils.testing_utils import require_torchsde
from .test_schedulers import SchedulerCommonTest
@require_torchsde
class UpperCAmelCase ( UpperCamelCase__ ):
__lowercase = (DPMSolverSDEScheduler,)
__lowercase = 10
def UpperCAmelCase_ ( self :List[Any] , **lowercase_ :Optional[int] )-> str:
A__ = {
"num_train_timesteps": 11_00,
"beta_start": 0.0_0_0_1,
"beta_end": 0.0_2,
"beta_schedule": "linear",
"noise_sampler_seed": 0,
}
config.update(**lowercase_ )
return config
def UpperCAmelCase_ ( self :int )-> Dict:
for timesteps in [10, 50, 1_00, 10_00]:
self.check_over_configs(num_train_timesteps=lowercase_ )
def UpperCAmelCase_ ( self :List[Any] )-> Tuple:
for beta_start, beta_end in zip([0.0_0_0_0_1, 0.0_0_0_1, 0.0_0_1] , [0.0_0_0_2, 0.0_0_2, 0.0_2] ):
self.check_over_configs(beta_start=lowercase_ , beta_end=lowercase_ )
def UpperCAmelCase_ ( self :Any )-> Optional[Any]:
for schedule in ["linear", "scaled_linear"]:
self.check_over_configs(beta_schedule=lowercase_ )
def UpperCAmelCase_ ( self :List[Any] )-> Dict:
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=lowercase_ )
def UpperCAmelCase_ ( self :List[str] )-> Union[str, Any]:
A__ = self.scheduler_classes[0]
A__ = self.get_scheduler_config()
A__ = scheduler_class(**lowercase_ )
scheduler.set_timesteps(self.num_inference_steps )
A__ = self.dummy_model()
A__ = self.dummy_sample_deter * scheduler.init_noise_sigma
A__ = sample.to(lowercase_ )
for i, t in enumerate(scheduler.timesteps ):
A__ = scheduler.scale_model_input(lowercase_ , lowercase_ )
A__ = model(lowercase_ , lowercase_ )
A__ = scheduler.step(lowercase_ , lowercase_ , lowercase_ )
A__ = output.prev_sample
A__ = torch.sum(torch.abs(lowercase_ ) )
A__ = torch.mean(torch.abs(lowercase_ ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 1_6_7.4_7_8_2_1_0_4_4_9_2_1_8_7_5 ) < 1E-2
assert abs(result_mean.item() - 0.2_1_7_8_7_0_5_9_6_4_5_6_5_2_7_7 ) < 1E-3
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 1_7_1.5_9_3_5_2_1_1_1_8_1_6_4_0_6 ) < 1E-2
assert abs(result_mean.item() - 0.2_2_3_4_2_9_0_6_8_9_2_2_9_9_6_5_2 ) < 1E-3
else:
assert abs(result_sum.item() - 1_6_2.5_2_3_8_3_4_2_2_8_5_1_5_6_2 ) < 1E-2
assert abs(result_mean.item() - 0.2_1_1_6_1_9_5_7_0_8_5_1_3_2_6 ) < 1E-3
def UpperCAmelCase_ ( self :Optional[int] )-> Dict:
A__ = self.scheduler_classes[0]
A__ = self.get_scheduler_config(prediction_type="v_prediction" )
A__ = scheduler_class(**lowercase_ )
scheduler.set_timesteps(self.num_inference_steps )
A__ = self.dummy_model()
A__ = self.dummy_sample_deter * scheduler.init_noise_sigma
A__ = sample.to(lowercase_ )
for i, t in enumerate(scheduler.timesteps ):
A__ = scheduler.scale_model_input(lowercase_ , lowercase_ )
A__ = model(lowercase_ , lowercase_ )
A__ = scheduler.step(lowercase_ , lowercase_ , lowercase_ )
A__ = output.prev_sample
A__ = torch.sum(torch.abs(lowercase_ ) )
A__ = torch.mean(torch.abs(lowercase_ ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 1_2_4.7_7_1_4_9_2_0_0_4_3_9_4_5_3 ) < 1E-2
assert abs(result_mean.item() - 0.1_6_2_2_6_2_8_9_0_1_4_8_1_6_2_8_4 ) < 1E-3
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 1_2_8.1_6_6_3_3_6_0_5_9_5_7_0_3 ) < 1E-2
assert abs(result_mean.item() - 0.1_6_6_8_8_3_2_6_0_0_1_1_6_7_2_9_7 ) < 1E-3
else:
assert abs(result_sum.item() - 1_1_9.8_4_8_7_5_4_8_8_2_8_1_2_5 ) < 1E-2
assert abs(result_mean.item() - 0.1_5_6_0_5_3_0_6_6_2_5_3_6_6_2_1 ) < 1E-3
def UpperCAmelCase_ ( self :Optional[int] )-> List[str]:
A__ = self.scheduler_classes[0]
A__ = self.get_scheduler_config()
A__ = scheduler_class(**lowercase_ )
scheduler.set_timesteps(self.num_inference_steps , device=lowercase_ )
A__ = self.dummy_model()
A__ = self.dummy_sample_deter.to(lowercase_ ) * scheduler.init_noise_sigma
for t in scheduler.timesteps:
A__ = scheduler.scale_model_input(lowercase_ , lowercase_ )
A__ = model(lowercase_ , lowercase_ )
A__ = scheduler.step(lowercase_ , lowercase_ , lowercase_ )
A__ = output.prev_sample
A__ = torch.sum(torch.abs(lowercase_ ) )
A__ = torch.mean(torch.abs(lowercase_ ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 1_6_7.4_6_9_5_7_3_9_7_4_6_0_9_3_8 ) < 1E-2
assert abs(result_mean.item() - 0.2_1_8_0_5_9_3_4_6_0_7_9_8_2_6_3_5 ) < 1E-3
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 1_7_1.5_9_3_5_3_6_3_7_6_9_5_3_1_2 ) < 1E-2
assert abs(result_mean.item() - 0.2_2_3_4_2_9_0_8_3_8_2_4_1_5_7_7_1 ) < 1E-3
else:
assert abs(result_sum.item() - 1_6_2.5_2_3_8_3_4_2_2_8_5_1_5_6_2 ) < 1E-2
assert abs(result_mean.item() - 0.2_1_1_6_1_9_5_7_0_8_5_1_3_2_6 ) < 1E-3
def UpperCAmelCase_ ( self :Tuple )-> Dict:
A__ = self.scheduler_classes[0]
A__ = self.get_scheduler_config()
A__ = scheduler_class(**lowercase_ , use_karras_sigmas=lowercase_ )
scheduler.set_timesteps(self.num_inference_steps , device=lowercase_ )
A__ = self.dummy_model()
A__ = self.dummy_sample_deter.to(lowercase_ ) * scheduler.init_noise_sigma
A__ = sample.to(lowercase_ )
for t in scheduler.timesteps:
A__ = scheduler.scale_model_input(lowercase_ , lowercase_ )
A__ = model(lowercase_ , lowercase_ )
A__ = scheduler.step(lowercase_ , lowercase_ , lowercase_ )
A__ = output.prev_sample
A__ = torch.sum(torch.abs(lowercase_ ) )
A__ = torch.mean(torch.abs(lowercase_ ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 1_7_6.6_6_9_7_4_1_3_5_7_4_2_1_8_8 ) < 1E-2
assert abs(result_mean.item() - 0.2_3_0_0_3_8_7_2_7_3_0_9_8_1_8_1_1 ) < 1E-2
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 1_7_7.6_3_6_5_3_5_6_4_4_5_3_1_2_5 ) < 1E-2
assert abs(result_mean.item() - 0.2_3_0_0_3_8_7_2_7_3_0_9_8_1_8_1_1 ) < 1E-2
else:
assert abs(result_sum.item() - 1_7_0.3_1_3_5_2_2_3_3_8_8_6_7_2 ) < 1E-2
assert abs(result_mean.item() - 0.2_3_0_0_3_8_7_2_7_3_0_9_8_1_8_1_1 ) < 1E-2
| 237 | 1 |
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import is_speech_available, is_vision_available
from transformers.testing_utils import require_torch
if is_vision_available():
from transformers import TvltImageProcessor
if is_speech_available():
from transformers import TvltFeatureExtractor
from transformers import TvltProcessor
@require_torch
class lowerCamelCase ( unittest.TestCase ):
def UpperCAmelCase(self : List[str] ) -> Optional[int]:
snake_case = "ZinengTang/tvlt-base"
snake_case = tempfile.mkdtemp()
def UpperCAmelCase(self : Union[str, Any] , **_A : List[str] ) -> List[Any]:
return TvltImageProcessor.from_pretrained(self.checkpoint , **_A )
def UpperCAmelCase(self : Dict , **_A : Tuple ) -> int:
return TvltFeatureExtractor.from_pretrained(self.checkpoint , **_A )
def UpperCAmelCase(self : Union[str, Any] ) -> Union[str, Any]:
shutil.rmtree(self.tmpdirname )
def UpperCAmelCase(self : Tuple ) -> int:
snake_case = self.get_image_processor()
snake_case = self.get_feature_extractor()
snake_case = TvltProcessor(image_processor=_A , feature_extractor=_A )
processor.save_pretrained(self.tmpdirname )
snake_case = TvltProcessor.from_pretrained(self.tmpdirname )
self.assertIsInstance(processor.feature_extractor , _A )
self.assertIsInstance(processor.image_processor , _A )
def UpperCAmelCase(self : Any ) -> int:
snake_case = self.get_image_processor()
snake_case = self.get_feature_extractor()
snake_case = TvltProcessor(image_processor=_A , feature_extractor=_A )
snake_case = np.ones([1_2_0_0_0] )
snake_case = feature_extractor(_A , return_tensors="np" )
snake_case = processor(audio=_A , return_tensors="np" )
for key in audio_dict.keys():
self.assertAlmostEqual(audio_dict[key].sum() , input_processor[key].sum() , delta=1E-2 )
def UpperCAmelCase(self : Dict ) -> str:
snake_case = self.get_image_processor()
snake_case = self.get_feature_extractor()
snake_case = TvltProcessor(image_processor=_A , feature_extractor=_A )
snake_case = np.ones([3, 2_2_4, 2_2_4] )
snake_case = image_processor(_A , return_tensors="np" )
snake_case = processor(images=_A , return_tensors="np" )
for key in image_dict.keys():
self.assertAlmostEqual(image_dict[key].sum() , input_processor[key].sum() , delta=1E-2 )
def UpperCAmelCase(self : Dict ) -> Any:
snake_case = self.get_image_processor()
snake_case = self.get_feature_extractor()
snake_case = TvltProcessor(image_processor=_A , feature_extractor=_A )
snake_case = np.ones([1_2_0_0_0] )
snake_case = np.ones([3, 2_2_4, 2_2_4] )
snake_case = processor(audio=_A , images=_A )
self.assertListEqual(list(inputs.keys() ) , ["audio_values", "audio_mask", "pixel_values", "pixel_mask"] )
# test if it raises when no input is passed
with pytest.raises(_A ):
processor()
def UpperCAmelCase(self : List[str] ) -> Optional[Any]:
snake_case = self.get_image_processor()
snake_case = self.get_feature_extractor()
snake_case = TvltProcessor(image_processor=_A , feature_extractor=_A )
self.assertListEqual(
processor.model_input_names , image_processor.model_input_names + feature_extractor.model_input_names , msg="`processor` and `image_processor`+`feature_extractor` model input names do not match" , )
| 137 |
import unittest
from transformers import LiltConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
LiltForQuestionAnswering,
LiltForSequenceClassification,
LiltForTokenClassification,
LiltModel,
)
from transformers.models.lilt.modeling_lilt import LILT_PRETRAINED_MODEL_ARCHIVE_LIST
class lowerCamelCase :
def __init__(self : Union[str, Any] , _A : Any , _A : Tuple=1_3 , _A : Optional[int]=7 , _A : Any=True , _A : str=True , _A : Union[str, Any]=True , _A : Optional[int]=True , _A : str=9_9 , _A : str=2_4 , _A : int=2 , _A : Optional[Any]=6 , _A : int=3_7 , _A : List[Any]="gelu" , _A : str=0.1 , _A : Dict=0.1 , _A : Dict=5_1_2 , _A : Tuple=1_6 , _A : List[str]=2 , _A : Dict=0.02 , _A : List[str]=3 , _A : Optional[Any]=None , _A : Dict=1_0_0_0 , ) -> Any:
snake_case = parent
snake_case = batch_size
snake_case = seq_length
snake_case = is_training
snake_case = use_input_mask
snake_case = use_token_type_ids
snake_case = use_labels
snake_case = vocab_size
snake_case = hidden_size
snake_case = num_hidden_layers
snake_case = num_attention_heads
snake_case = intermediate_size
snake_case = hidden_act
snake_case = hidden_dropout_prob
snake_case = attention_probs_dropout_prob
snake_case = max_position_embeddings
snake_case = type_vocab_size
snake_case = type_sequence_label_size
snake_case = initializer_range
snake_case = num_labels
snake_case = scope
snake_case = range_bbox
def UpperCAmelCase(self : List[str] ) -> List[str]:
snake_case = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
snake_case = ids_tensor([self.batch_size, self.seq_length, 4] , self.range_bbox )
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
snake_case = bbox[i, j, 3]
snake_case = bbox[i, j, 1]
snake_case = t
if bbox[i, j, 2] < bbox[i, j, 0]:
snake_case = bbox[i, j, 2]
snake_case = bbox[i, j, 0]
snake_case = t
snake_case = None
if self.use_input_mask:
snake_case = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
snake_case = None
if self.use_token_type_ids:
snake_case = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
snake_case = None
snake_case = None
if self.use_labels:
snake_case = ids_tensor([self.batch_size] , self.type_sequence_label_size )
snake_case = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
snake_case = self.get_config()
return config, input_ids, bbox, token_type_ids, input_mask, sequence_labels, token_labels
def UpperCAmelCase(self : Tuple ) -> Tuple:
return LiltConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , )
def UpperCAmelCase(self : List[str] , _A : Dict , _A : List[Any] , _A : Optional[Any] , _A : Dict , _A : str , _A : Optional[Any] , _A : Tuple , ) -> Dict:
snake_case = LiltModel(config=_A )
model.to(_A )
model.eval()
snake_case = model(_A , bbox=_A , attention_mask=_A , token_type_ids=_A )
snake_case = model(_A , bbox=_A , token_type_ids=_A )
snake_case = model(_A , bbox=_A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def UpperCAmelCase(self : Optional[Any] , _A : Optional[int] , _A : Dict , _A : List[Any] , _A : Tuple , _A : Optional[int] , _A : Tuple , _A : Union[str, Any] , ) -> Optional[int]:
snake_case = self.num_labels
snake_case = LiltForTokenClassification(config=_A )
model.to(_A )
model.eval()
snake_case = model(
_A , bbox=_A , attention_mask=_A , token_type_ids=_A , labels=_A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def UpperCAmelCase(self : str , _A : List[Any] , _A : Union[str, Any] , _A : Any , _A : List[str] , _A : List[str] , _A : Optional[int] , _A : Optional[Any] , ) -> Optional[int]:
snake_case = LiltForQuestionAnswering(config=_A )
model.to(_A )
model.eval()
snake_case = model(
_A , bbox=_A , attention_mask=_A , token_type_ids=_A , start_positions=_A , end_positions=_A , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def UpperCAmelCase(self : str ) -> str:
snake_case = self.prepare_config_and_inputs()
(
(
snake_case
) , (
snake_case
) , (
snake_case
) , (
snake_case
) , (
snake_case
) , (
snake_case
) , (
snake_case
) ,
) = config_and_inputs
snake_case = {
"input_ids": input_ids,
"bbox": bbox,
"token_type_ids": token_type_ids,
"attention_mask": input_mask,
}
return config, inputs_dict
@require_torch
class lowerCamelCase ( A_ , A_ , A_ , unittest.TestCase ):
UpperCAmelCase__ : Optional[int] = (
(
LiltModel,
LiltForSequenceClassification,
LiltForTokenClassification,
LiltForQuestionAnswering,
)
if is_torch_available()
else ()
)
UpperCAmelCase__ : List[Any] = (
{
"feature-extraction": LiltModel,
"question-answering": LiltForQuestionAnswering,
"text-classification": LiltForSequenceClassification,
"token-classification": LiltForTokenClassification,
"zero-shot": LiltForSequenceClassification,
}
if is_torch_available()
else {}
)
UpperCAmelCase__ : Optional[Any] = False
UpperCAmelCase__ : Optional[int] = False
def UpperCAmelCase(self : Dict , _A : Optional[Any] , _A : Dict , _A : Union[str, Any] , _A : int , _A : Union[str, Any] ) -> int:
return True
def UpperCAmelCase(self : str ) -> Tuple:
snake_case = LiltModelTester(self )
snake_case = ConfigTester(self , config_class=_A , hidden_size=3_7 )
def UpperCAmelCase(self : Optional[int] ) -> List[str]:
self.config_tester.run_common_tests()
def UpperCAmelCase(self : Tuple ) -> Dict:
snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_A )
def UpperCAmelCase(self : int ) -> Union[str, Any]:
snake_case = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
snake_case = type
self.model_tester.create_and_check_model(*_A )
def UpperCAmelCase(self : Optional[Any] ) -> List[Any]:
snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*_A )
def UpperCAmelCase(self : Optional[Any] ) -> Optional[int]:
snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*_A )
@slow
def UpperCAmelCase(self : Optional[Any] ) -> Optional[Any]:
for model_name in LILT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
snake_case = LiltModel.from_pretrained(_A )
self.assertIsNotNone(_A )
@require_torch
@slow
class lowerCamelCase ( unittest.TestCase ):
def UpperCAmelCase(self : Tuple ) -> Optional[int]:
snake_case = LiltModel.from_pretrained("SCUT-DLVCLab/lilt-roberta-en-base" ).to(_A )
snake_case = torch.tensor([[1, 2]] , device=_A )
snake_case = torch.tensor([[[1, 2, 3, 4], [5, 6, 7, 8]]] , device=_A )
# forward pass
with torch.no_grad():
snake_case = model(input_ids=_A , bbox=_A )
snake_case = torch.Size([1, 2, 7_6_8] )
snake_case = torch.tensor(
[[-0.06_53, 0.09_50, -0.00_61], [-0.05_45, 0.09_26, -0.03_24]] , device=_A , )
self.assertTrue(outputs.last_hidden_state.shape , _A )
self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :, :3] , _A , atol=1E-3 ) )
| 137 | 1 |
"""simple docstring"""
from dataclasses import dataclass
from typing import Optional, Tuple
import torch
from torch import nn
from transformers import RobertaPreTrainedModel, XLMRobertaConfig, XLMRobertaModel
from transformers.utils import ModelOutput
@dataclass
class __SCREAMING_SNAKE_CASE ( lowerCAmelCase_ ):
'''simple docstring'''
_a = None
_a = None
_a = None
_a = None
class __SCREAMING_SNAKE_CASE ( lowerCAmelCase_ ):
'''simple docstring'''
def __init__( self : Optional[Any], lowerCamelCase : Optional[Any]=1, lowerCamelCase : Any=0, lowerCamelCase : Optional[Any]=2, lowerCamelCase : Union[str, Any]=512, lowerCamelCase : Dict="cls", lowerCamelCase : Dict=False, lowerCamelCase : Union[str, Any]=True, **lowerCamelCase : int, )-> List[Any]:
super().__init__(pad_token_id=A_, bos_token_id=A_, eos_token_id=A_, **A_ )
lowerCamelCase__ : Union[str, Any] =project_dim
lowerCamelCase__ : Union[str, Any] =pooler_fn
lowerCamelCase__ : int =learn_encoder
lowerCamelCase__ : Union[str, Any] =use_attention_mask
class __SCREAMING_SNAKE_CASE ( lowerCAmelCase_ ):
'''simple docstring'''
_a = [r'pooler', r'logit_scale']
_a = [r'position_ids', r'predictions.decoder.bias']
_a = 'roberta'
_a = RobertaSeriesConfig
def __init__( self : Tuple, lowerCamelCase : Any )-> Tuple:
super().__init__(A_ )
lowerCamelCase__ : Optional[int] =XLMRobertaModel(A_ )
lowerCamelCase__ : Tuple =nn.Linear(config.hidden_size, config.project_dim )
lowerCamelCase__ : List[Any] =getattr(A_, '''has_pre_transformation''', A_ )
if self.has_pre_transformation:
lowerCamelCase__ : Any =nn.Linear(config.hidden_size, config.project_dim )
lowerCamelCase__ : Optional[Any] =nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps )
self.post_init()
def snake_case ( self : Union[str, Any], lowerCamelCase : Optional[torch.Tensor] = None, lowerCamelCase : Optional[torch.Tensor] = None, lowerCamelCase : Optional[torch.Tensor] = None, lowerCamelCase : Optional[torch.Tensor] = None, lowerCamelCase : Optional[torch.Tensor] = None, lowerCamelCase : Optional[torch.Tensor] = None, lowerCamelCase : Optional[torch.Tensor] = None, lowerCamelCase : Optional[torch.Tensor] = None, lowerCamelCase : Optional[bool] = None, lowerCamelCase : Optional[bool] = None, lowerCamelCase : Optional[bool] = None, )-> Union[str, Any]:
lowerCamelCase__ : List[Any] =return_dict if return_dict is not None else self.config.use_return_dict
lowerCamelCase__ : Dict =self.base_model(
input_ids=A_, attention_mask=A_, token_type_ids=A_, position_ids=A_, head_mask=A_, inputs_embeds=A_, encoder_hidden_states=A_, encoder_attention_mask=A_, output_attentions=A_, output_hidden_states=True if self.has_pre_transformation else output_hidden_states, return_dict=A_, )
if self.has_pre_transformation:
lowerCamelCase__ : Optional[int] =outputs['''hidden_states'''][-2]
lowerCamelCase__ : Any =self.pre_LN(A_ )
lowerCamelCase__ : List[str] =self.transformation_pre(A_ )
return TransformationModelOutput(
projection_state=A_, last_hidden_state=outputs.last_hidden_state, hidden_states=outputs.hidden_states, attentions=outputs.attentions, )
else:
lowerCamelCase__ : Tuple =self.transformation(outputs.last_hidden_state )
return TransformationModelOutput(
projection_state=A_, last_hidden_state=outputs.last_hidden_state, hidden_states=outputs.hidden_states, attentions=outputs.attentions, )
| 238 |
from typing import Dict, Iterable, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import normalize, rescale, resize, to_channel_dimension_format, to_pil_image
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_pytesseract_available, is_vision_available, logging, requires_backends
if is_vision_available():
import PIL
# soft dependency
if is_pytesseract_available():
import pytesseract
lowerCamelCase : List[str] = logging.get_logger(__name__)
def _SCREAMING_SNAKE_CASE ( lowercase : Tuple , lowercase : Optional[int] , lowercase : Optional[int] ):
'''simple docstring'''
return [
int(10_00 * (box[0] / width) ),
int(10_00 * (box[1] / height) ),
int(10_00 * (box[2] / width) ),
int(10_00 * (box[3] / height) ),
]
def _SCREAMING_SNAKE_CASE ( lowercase : np.ndarray , lowercase : Optional[str] , lowercase : Optional[str] ):
'''simple docstring'''
lowerCamelCase_ = to_pil_image(lowercase )
lowerCamelCase_ , lowerCamelCase_ = pil_image.size
lowerCamelCase_ = pytesseract.image_to_data(lowercase , lang=lowercase , output_type='dict' , config=lowercase )
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = data['text'], data['left'], data['top'], data['width'], data['height']
# filter empty words and corresponding coordinates
lowerCamelCase_ = [idx for idx, word in enumerate(lowercase ) if not word.strip()]
lowerCamelCase_ = [word for idx, word in enumerate(lowercase ) if idx not in irrelevant_indices]
lowerCamelCase_ = [coord for idx, coord in enumerate(lowercase ) if idx not in irrelevant_indices]
lowerCamelCase_ = [coord for idx, coord in enumerate(lowercase ) if idx not in irrelevant_indices]
lowerCamelCase_ = [coord for idx, coord in enumerate(lowercase ) if idx not in irrelevant_indices]
lowerCamelCase_ = [coord for idx, coord in enumerate(lowercase ) if idx not in irrelevant_indices]
# turn coordinates into (left, top, left+width, top+height) format
lowerCamelCase_ = []
for x, y, w, h in zip(lowercase , lowercase , lowercase , lowercase ):
lowerCamelCase_ = [x, y, x + w, y + h]
actual_boxes.append(lowercase )
# finally, normalize the bounding boxes
lowerCamelCase_ = []
for box in actual_boxes:
normalized_boxes.append(normalize_box(lowercase , lowercase , lowercase ) )
assert len(lowercase ) == len(lowercase ), "Not as many words as there are bounding boxes"
return words, normalized_boxes
class A( UpperCamelCase ):
'''simple docstring'''
UpperCamelCase = ['''pixel_values''']
def __init__( self : int , A_ : bool = True , A_ : Dict[str, int] = None , A_ : PILImageResampling = PILImageResampling.BILINEAR , A_ : bool = True , A_ : float = 1 / 255 , A_ : bool = True , A_ : Union[float, Iterable[float]] = None , A_ : Union[float, Iterable[float]] = None , A_ : bool = True , A_ : Optional[str] = None , A_ : Optional[str] = "" , **A_ : Optional[int] , ) -> None:
"""simple docstring"""
super().__init__(**A_ )
lowerCamelCase_ = size if size is not None else {'height': 224, 'width': 224}
lowerCamelCase_ = get_size_dict(A_ )
lowerCamelCase_ = do_resize
lowerCamelCase_ = size
lowerCamelCase_ = resample
lowerCamelCase_ = do_rescale
lowerCamelCase_ = rescale_value
lowerCamelCase_ = do_normalize
lowerCamelCase_ = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
lowerCamelCase_ = image_std if image_std is not None else IMAGENET_STANDARD_STD
lowerCamelCase_ = apply_ocr
lowerCamelCase_ = ocr_lang
lowerCamelCase_ = tesseract_config
def a__ ( self : str , A_ : np.ndarray , A_ : Dict[str, int] , A_ : PILImageResampling = PILImageResampling.BILINEAR , A_ : Optional[Union[str, ChannelDimension]] = None , **A_ : str , ) -> np.ndarray:
"""simple docstring"""
lowerCamelCase_ = get_size_dict(A_ )
if "height" not in size or "width" not in size:
raise ValueError(f"""The size dictionary must contain the keys 'height' and 'width'. Got {size.keys()}""" )
lowerCamelCase_ = (size['height'], size['width'])
return resize(A_ , size=A_ , resample=A_ , data_format=A_ , **A_ )
def a__ ( self : Any , A_ : np.ndarray , A_ : Union[int, float] , A_ : Optional[Union[str, ChannelDimension]] = None , **A_ : Optional[Any] , ) -> np.ndarray:
"""simple docstring"""
return rescale(A_ , scale=A_ , data_format=A_ , **A_ )
def a__ ( self : Union[str, Any] , A_ : np.ndarray , A_ : Union[float, Iterable[float]] , A_ : Union[float, Iterable[float]] , A_ : Optional[Union[str, ChannelDimension]] = None , **A_ : int , ) -> np.ndarray:
"""simple docstring"""
return normalize(A_ , mean=A_ , std=A_ , data_format=A_ , **A_ )
def a__ ( self : List[Any] , A_ : ImageInput , A_ : bool = None , A_ : Dict[str, int] = None , A_ : Dict=None , A_ : bool = None , A_ : float = None , A_ : bool = None , A_ : Union[float, Iterable[float]] = None , A_ : Union[float, Iterable[float]] = None , A_ : bool = None , A_ : Optional[str] = None , A_ : Optional[str] = None , A_ : Optional[Union[str, TensorType]] = None , A_ : ChannelDimension = ChannelDimension.FIRST , **A_ : Any , ) -> PIL.Image.Image:
"""simple docstring"""
lowerCamelCase_ = do_resize if do_resize is not None else self.do_resize
lowerCamelCase_ = size if size is not None else self.size
lowerCamelCase_ = get_size_dict(A_ )
lowerCamelCase_ = resample if resample is not None else self.resample
lowerCamelCase_ = do_rescale if do_rescale is not None else self.do_rescale
lowerCamelCase_ = rescale_factor if rescale_factor is not None else self.rescale_factor
lowerCamelCase_ = do_normalize if do_normalize is not None else self.do_normalize
lowerCamelCase_ = image_mean if image_mean is not None else self.image_mean
lowerCamelCase_ = image_std if image_std is not None else self.image_std
lowerCamelCase_ = apply_ocr if apply_ocr is not None else self.apply_ocr
lowerCamelCase_ = ocr_lang if ocr_lang is not None else self.ocr_lang
lowerCamelCase_ = tesseract_config if tesseract_config is not None else self.tesseract_config
lowerCamelCase_ = make_list_of_images(A_ )
if not valid_images(A_ ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
if do_resize and size is None:
raise ValueError('Size must be specified if do_resize is True.' )
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('If do_normalize is True, image_mean and image_std must be specified.' )
# All transformations expect numpy arrays.
lowerCamelCase_ = [to_numpy_array(A_ ) for image in images]
# Tesseract OCR to get words + normalized bounding boxes
if apply_ocr:
requires_backends(self , 'pytesseract' )
lowerCamelCase_ = []
lowerCamelCase_ = []
for image in images:
lowerCamelCase_ , lowerCamelCase_ = apply_tesseract(A_ , A_ , A_ )
words_batch.append(A_ )
boxes_batch.append(A_ )
if do_resize:
lowerCamelCase_ = [self.resize(image=A_ , size=A_ , resample=A_ ) for image in images]
if do_rescale:
lowerCamelCase_ = [self.rescale(image=A_ , scale=A_ ) for image in images]
if do_normalize:
lowerCamelCase_ = [self.normalize(image=A_ , mean=A_ , std=A_ ) for image in images]
lowerCamelCase_ = [to_channel_dimension_format(A_ , A_ ) for image in images]
lowerCamelCase_ = BatchFeature(data={'pixel_values': images} , tensor_type=A_ )
if apply_ocr:
lowerCamelCase_ = words_batch
lowerCamelCase_ = boxes_batch
return data
| 204 | 0 |
import json
import os
from typing import Optional
import numpy as np
from ...feature_extraction_utils import BatchFeature
from ...processing_utils import ProcessorMixin
from ...utils import logging
from ...utils.hub import get_file_from_repo
from ..auto import AutoTokenizer
_A = logging.get_logger(__name__)
class lowercase_ ( UpperCAmelCase__ ):
A__ : Optional[int] = 'AutoTokenizer'
A__ : Tuple = ['tokenizer']
A__ : Union[str, Any] = {
'semantic_prompt': 1,
'coarse_prompt': 2,
'fine_prompt': 2,
}
def __init__( self , __UpperCamelCase , __UpperCamelCase=None ):
"""simple docstring"""
super().__init__(_SCREAMING_SNAKE_CASE )
UpperCamelCase_ = speaker_embeddings
@classmethod
def lowerCamelCase_ ( cls , __UpperCamelCase , __UpperCamelCase="speaker_embeddings_path.json" , **__UpperCamelCase ):
"""simple docstring"""
if speaker_embeddings_dict_path is not None:
UpperCamelCase_ = get_file_from_repo(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , subfolder=kwargs.pop("""subfolder""" , _SCREAMING_SNAKE_CASE ) , cache_dir=kwargs.pop("""cache_dir""" , _SCREAMING_SNAKE_CASE ) , force_download=kwargs.pop("""force_download""" , _SCREAMING_SNAKE_CASE ) , proxies=kwargs.pop("""proxies""" , _SCREAMING_SNAKE_CASE ) , resume_download=kwargs.pop("""resume_download""" , _SCREAMING_SNAKE_CASE ) , local_files_only=kwargs.pop("""local_files_only""" , _SCREAMING_SNAKE_CASE ) , use_auth_token=kwargs.pop("""use_auth_token""" , _SCREAMING_SNAKE_CASE ) , revision=kwargs.pop("""revision""" , _SCREAMING_SNAKE_CASE ) , )
if speaker_embeddings_path is None:
logger.warning(
f'''`{os.path.join(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )}` does not exists\n , no preloaded speaker embeddings will be used - Make sure to provide a correct path to the json\n dictionnary if wanted, otherwise set `speaker_embeddings_dict_path=None`.''' )
UpperCamelCase_ = None
else:
with open(_SCREAMING_SNAKE_CASE ) as speaker_embeddings_json:
UpperCamelCase_ = json.load(_SCREAMING_SNAKE_CASE )
else:
UpperCamelCase_ = None
UpperCamelCase_ = AutoTokenizer.from_pretrained(_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
return cls(tokenizer=_SCREAMING_SNAKE_CASE , speaker_embeddings=_SCREAMING_SNAKE_CASE )
def lowerCamelCase_ ( self , __UpperCamelCase , __UpperCamelCase="speaker_embeddings_path.json" , __UpperCamelCase="speaker_embeddings" , __UpperCamelCase = False , **__UpperCamelCase , ):
"""simple docstring"""
if self.speaker_embeddings is not None:
os.makedirs(os.path.join(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , """v2""" ) , exist_ok=_SCREAMING_SNAKE_CASE )
UpperCamelCase_ = {}
UpperCamelCase_ = save_directory
for prompt_key in self.speaker_embeddings:
if prompt_key != "repo_or_path":
UpperCamelCase_ = self._load_voice_preset(_SCREAMING_SNAKE_CASE )
UpperCamelCase_ = {}
for key in self.speaker_embeddings[prompt_key]:
np.save(
os.path.join(
embeddings_dict["""repo_or_path"""] , _SCREAMING_SNAKE_CASE , f'''{prompt_key}_{key}''' ) , voice_preset[key] , allow_pickle=_SCREAMING_SNAKE_CASE , )
UpperCamelCase_ = os.path.join(_SCREAMING_SNAKE_CASE , f'''{prompt_key}_{key}.npy''' )
UpperCamelCase_ = tmp_dict
with open(os.path.join(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) , """w""" ) as fp:
json.dump(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
super().save_pretrained(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
def lowerCamelCase_ ( self , __UpperCamelCase = None , **__UpperCamelCase ):
"""simple docstring"""
UpperCamelCase_ = self.speaker_embeddings[voice_preset]
UpperCamelCase_ = {}
for key in ["semantic_prompt", "coarse_prompt", "fine_prompt"]:
if key not in voice_preset_paths:
raise ValueError(
f'''Voice preset unrecognized, missing {key} as a key in self.speaker_embeddings[{voice_preset}].''' )
UpperCamelCase_ = get_file_from_repo(
self.speaker_embeddings.get("""repo_or_path""" , """/""" ) , voice_preset_paths[key] , subfolder=kwargs.pop("""subfolder""" , _SCREAMING_SNAKE_CASE ) , cache_dir=kwargs.pop("""cache_dir""" , _SCREAMING_SNAKE_CASE ) , force_download=kwargs.pop("""force_download""" , _SCREAMING_SNAKE_CASE ) , proxies=kwargs.pop("""proxies""" , _SCREAMING_SNAKE_CASE ) , resume_download=kwargs.pop("""resume_download""" , _SCREAMING_SNAKE_CASE ) , local_files_only=kwargs.pop("""local_files_only""" , _SCREAMING_SNAKE_CASE ) , use_auth_token=kwargs.pop("""use_auth_token""" , _SCREAMING_SNAKE_CASE ) , revision=kwargs.pop("""revision""" , _SCREAMING_SNAKE_CASE ) , )
if path is None:
raise ValueError(
f'''`{os.path.join(self.speaker_embeddings.get("repo_or_path" , "/" ) , voice_preset_paths[key] )}` does not exists\n , no preloaded voice preset will be used - Make sure to provide correct paths to the {voice_preset}\n embeddings.''' )
UpperCamelCase_ = np.load(_SCREAMING_SNAKE_CASE )
return voice_preset_dict
def lowerCamelCase_ ( self , __UpperCamelCase = None ):
"""simple docstring"""
for key in ["semantic_prompt", "coarse_prompt", "fine_prompt"]:
if key not in voice_preset:
raise ValueError(f'''Voice preset unrecognized, missing {key} as a key.''' )
if not isinstance(voice_preset[key] , np.ndarray ):
raise ValueError(f'''{key} voice preset must be a {str(self.preset_shape[key] )}D ndarray.''' )
if len(voice_preset[key].shape ) != self.preset_shape[key]:
raise ValueError(f'''{key} voice preset must be a {str(self.preset_shape[key] )}D ndarray.''' )
def __call__( self , __UpperCamelCase=None , __UpperCamelCase=None , __UpperCamelCase="pt" , __UpperCamelCase=2_5_6 , __UpperCamelCase=False , __UpperCamelCase=True , __UpperCamelCase=False , **__UpperCamelCase , ):
"""simple docstring"""
if voice_preset is not None and not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
if (
isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
and self.speaker_embeddings is not None
and voice_preset in self.speaker_embeddings
):
UpperCamelCase_ = self._load_voice_preset(_SCREAMING_SNAKE_CASE )
else:
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) and not voice_preset.endswith(""".npz""" ):
UpperCamelCase_ = voice_preset + """.npz"""
UpperCamelCase_ = np.load(_SCREAMING_SNAKE_CASE )
if voice_preset is not None:
self._validate_voice_preset_dict(_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
UpperCamelCase_ = BatchFeature(data=_SCREAMING_SNAKE_CASE , tensor_type=_SCREAMING_SNAKE_CASE )
UpperCamelCase_ = self.tokenizer(
_SCREAMING_SNAKE_CASE , return_tensors=_SCREAMING_SNAKE_CASE , padding="""max_length""" , max_length=_SCREAMING_SNAKE_CASE , return_attention_mask=_SCREAMING_SNAKE_CASE , return_token_type_ids=_SCREAMING_SNAKE_CASE , add_special_tokens=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE , )
if voice_preset is not None:
UpperCamelCase_ = voice_preset
return encoded_text
| 360 |
from typing import List, Optional
import numpy as np
from ...processing_utils import ProcessorMixin
from ...utils import to_numpy
class lowercase_ ( __SCREAMING_SNAKE_CASE ):
A__ : List[Any] = """EncodecFeatureExtractor"""
A__ : Tuple = ("""T5Tokenizer""", """T5TokenizerFast""")
def __init__( self , __UpperCamelCase , __UpperCamelCase ):
"""simple docstring"""
super().__init__(__UpperCamelCase , __UpperCamelCase )
UpperCamelCase_ = self.feature_extractor
UpperCamelCase_ = False
def lowerCamelCase_ ( self , __UpperCamelCase=None , __UpperCamelCase=None , __UpperCamelCase=True ):
"""simple docstring"""
return self.tokenizer.get_decoder_prompt_ids(task=__UpperCamelCase , language=__UpperCamelCase , no_timestamps=__UpperCamelCase )
def __call__( self , *__UpperCamelCase , **__UpperCamelCase ):
"""simple docstring"""
if self._in_target_context_manager:
return self.current_processor(*__UpperCamelCase , **__UpperCamelCase )
UpperCamelCase_ = kwargs.pop("""audio""" , __UpperCamelCase )
UpperCamelCase_ = kwargs.pop("""sampling_rate""" , __UpperCamelCase )
UpperCamelCase_ = kwargs.pop("""text""" , __UpperCamelCase )
if len(__UpperCamelCase ) > 0:
UpperCamelCase_ = args[0]
UpperCamelCase_ = args[1:]
if audio is None and text is None:
raise ValueError("""You need to specify either an `audio` or `text` input to process.""" )
if text is not None:
UpperCamelCase_ = self.tokenizer(__UpperCamelCase , **__UpperCamelCase )
if audio is not None:
UpperCamelCase_ = self.feature_extractor(__UpperCamelCase , *__UpperCamelCase , sampling_rate=__UpperCamelCase , **__UpperCamelCase )
if audio is None:
return inputs
elif text is None:
return audio_inputs
else:
UpperCamelCase_ = audio_inputs["""input_values"""]
if "padding_mask" in audio_inputs:
UpperCamelCase_ = audio_inputs["""padding_mask"""]
return inputs
def lowerCamelCase_ ( self , *__UpperCamelCase , **__UpperCamelCase ):
"""simple docstring"""
UpperCamelCase_ = kwargs.pop("""audio""" , __UpperCamelCase )
UpperCamelCase_ = kwargs.pop("""padding_mask""" , __UpperCamelCase )
if len(__UpperCamelCase ) > 0:
UpperCamelCase_ = args[0]
UpperCamelCase_ = args[1:]
if audio_values is not None:
return self._decode_audio(__UpperCamelCase , padding_mask=__UpperCamelCase )
else:
return self.tokenizer.batch_decode(*__UpperCamelCase , **__UpperCamelCase )
def lowerCamelCase_ ( self , *__UpperCamelCase , **__UpperCamelCase ):
"""simple docstring"""
return self.tokenizer.decode(*__UpperCamelCase , **__UpperCamelCase )
def lowerCamelCase_ ( self , __UpperCamelCase , __UpperCamelCase = None ):
"""simple docstring"""
UpperCamelCase_ = to_numpy(__UpperCamelCase )
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ = audio_values.shape
if padding_mask is None:
return list(__UpperCamelCase )
UpperCamelCase_ = to_numpy(__UpperCamelCase )
# match the sequence length of the padding mask to the generated audio arrays by padding with the **non-padding**
# token (so that the generated audio values are **not** treated as padded tokens)
UpperCamelCase_ = seq_len - padding_mask.shape[-1]
UpperCamelCase_ = 1 - self.feature_extractor.padding_value
UpperCamelCase_ = np.pad(__UpperCamelCase , ((0, 0), (0, difference)) , """constant""" , constant_values=__UpperCamelCase )
UpperCamelCase_ = audio_values.tolist()
for i in range(__UpperCamelCase ):
UpperCamelCase_ = np.asarray(audio_values[i] )[
padding_mask[i][None, :] != self.feature_extractor.padding_value
]
UpperCamelCase_ = sliced_audio.reshape(__UpperCamelCase , -1 )
return audio_values
| 261 | 0 |
"""simple docstring"""
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
UpperCAmelCase__ = logging.get_logger(__name__)
UpperCAmelCase__ = {'vocab_file': 'sentencepiece.bpe.model'}
UpperCAmelCase__ = {
'vocab_file': {
'camembert-base': 'https://huggingface.co/camembert-base/resolve/main/sentencepiece.bpe.model',
}
}
UpperCAmelCase__ = {
'camembert-base': 512,
}
UpperCAmelCase__ = '▁'
class lowerCAmelCase__ ( A_ ):
__a = VOCAB_FILES_NAMES
__a = PRETRAINED_VOCAB_FILES_MAP
__a = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__a = ["""input_ids""", """attention_mask"""]
def __init__( self : Union[str, Any] , _lowerCamelCase : Optional[Any] , _lowerCamelCase : int="<s>" , _lowerCamelCase : Union[str, Any]="</s>" , _lowerCamelCase : str="</s>" , _lowerCamelCase : Dict="<s>" , _lowerCamelCase : int="<unk>" , _lowerCamelCase : int="<pad>" , _lowerCamelCase : Union[str, Any]="<mask>" , _lowerCamelCase : Optional[Any]=["<s>NOTUSED", "</s>NOTUSED"] , _lowerCamelCase : Optional[Dict[str, Any]] = None , **_lowerCamelCase : List[Any] , ):
# Mask token behave like a normal word, i.e. include the space before it
_snake_case = AddedToken(_lowerCamelCase , lstrip=_lowerCamelCase , rstrip=_lowerCamelCase ) if isinstance(_lowerCamelCase , _lowerCamelCase ) else mask_token
_snake_case = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=_lowerCamelCase , eos_token=_lowerCamelCase , unk_token=_lowerCamelCase , sep_token=_lowerCamelCase , cls_token=_lowerCamelCase , pad_token=_lowerCamelCase , mask_token=_lowerCamelCase , additional_special_tokens=_lowerCamelCase , sp_model_kwargs=self.sp_model_kwargs , **_lowerCamelCase , )
_snake_case = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(_lowerCamelCase ) )
_snake_case = vocab_file
# HACK: These tokens were added by fairseq but don't seem to be actually used when duplicated in the actual
# sentencepiece vocabulary (this is the case for <s> and </s>
_snake_case = {'''<s>NOTUSED''': 0, '''<pad>''': 1, '''</s>NOTUSED''': 2, '''<unk>''': 3}
_snake_case = len(self.fairseq_tokens_to_ids )
_snake_case = len(self.sp_model ) + len(self.fairseq_tokens_to_ids )
_snake_case = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def lowercase ( self : str , _lowerCamelCase : List[int] , _lowerCamelCase : Optional[List[int]] = None ):
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
_snake_case = [self.cls_token_id]
_snake_case = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def lowercase ( self : List[Any] , _lowerCamelCase : List[int] , _lowerCamelCase : Optional[List[int]] = None , _lowerCamelCase : bool = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_lowerCamelCase , token_ids_a=_lowerCamelCase , already_has_special_tokens=_lowerCamelCase )
if token_ids_a is None:
return [1] + ([0] * len(_lowerCamelCase )) + [1]
return [1] + ([0] * len(_lowerCamelCase )) + [1, 1] + ([0] * len(_lowerCamelCase )) + [1]
def lowercase ( self : Dict , _lowerCamelCase : List[int] , _lowerCamelCase : Optional[List[int]] = None ):
_snake_case = [self.sep_token_id]
_snake_case = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def lowercase ( self : List[Any] ):
return len(self.fairseq_tokens_to_ids ) + len(self.sp_model )
def lowercase ( self : Any ):
_snake_case = {self.convert_ids_to_tokens(_lowerCamelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def lowercase ( self : List[str] , _lowerCamelCase : str ):
return self.sp_model.encode(_lowerCamelCase , out_type=_lowerCamelCase )
def lowercase ( self : Tuple , _lowerCamelCase : int ):
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
elif self.sp_model.PieceToId(_lowerCamelCase ) == 0:
# Convert sentence piece unk token to fairseq unk token index
return self.unk_token_id
return self.fairseq_offset + self.sp_model.PieceToId(_lowerCamelCase )
def lowercase ( self : List[Any] , _lowerCamelCase : Optional[int] ):
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def lowercase ( self : int , _lowerCamelCase : Union[str, Any] ):
_snake_case = []
_snake_case = ''''''
_snake_case = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(_lowerCamelCase ) + token
_snake_case = True
_snake_case = []
else:
current_sub_tokens.append(_lowerCamelCase )
_snake_case = False
out_string += self.sp_model.decode(_lowerCamelCase )
return out_string.strip()
def __getstate__( self : str ):
_snake_case = self.__dict__.copy()
_snake_case = None
return state
def __setstate__( self : Dict , _lowerCamelCase : int ):
_snake_case = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
_snake_case = {}
_snake_case = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def lowercase ( self : str , _lowerCamelCase : str , _lowerCamelCase : Optional[str] = None ):
if not os.path.isdir(_lowerCamelCase ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
_snake_case = os.path.join(
_lowerCamelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_lowerCamelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , _lowerCamelCase )
elif not os.path.isfile(self.vocab_file ):
with open(_lowerCamelCase , '''wb''' ) as fi:
_snake_case = self.sp_model.serialized_model_proto()
fi.write(_lowerCamelCase )
return (out_vocab_file,)
| 288 |
"""simple docstring"""
import math
import numpy as np
import qiskit
from qiskit import Aer, ClassicalRegister, QuantumCircuit, QuantumRegister, execute
def _UpperCAmelCase ( __lowerCamelCase : int = 3 ) -> qiskit.result.counts.Counts:
if isinstance(__lowerCamelCase , __lowerCamelCase ):
raise TypeError('''number of qubits must be a integer.''' )
if number_of_qubits <= 0:
raise ValueError('''number of qubits must be > 0.''' )
if math.floor(__lowerCamelCase ) != number_of_qubits:
raise ValueError('''number of qubits must be exact integer.''' )
if number_of_qubits > 10:
raise ValueError('''number of qubits too large to simulate(>10).''' )
_snake_case = QuantumRegister(__lowerCamelCase , '''qr''' )
_snake_case = ClassicalRegister(__lowerCamelCase , '''cr''' )
_snake_case = QuantumCircuit(__lowerCamelCase , __lowerCamelCase )
_snake_case = number_of_qubits
for i in range(__lowerCamelCase ):
quantum_circuit.h(number_of_qubits - i - 1 )
counter -= 1
for j in range(__lowerCamelCase ):
quantum_circuit.cp(np.pi / 2 ** (counter - j) , __lowerCamelCase , __lowerCamelCase )
for k in range(number_of_qubits // 2 ):
quantum_circuit.swap(__lowerCamelCase , number_of_qubits - k - 1 )
# measure all the qubits
quantum_circuit.measure(__lowerCamelCase , __lowerCamelCase )
# simulate with 10000 shots
_snake_case = Aer.get_backend('''qasm_simulator''' )
_snake_case = execute(__lowerCamelCase , __lowerCamelCase , shots=1_00_00 )
return job.result().get_counts(__lowerCamelCase )
if __name__ == "__main__":
print(
F"Total count for quantum fourier transform state is: \
{quantum_fourier_transform(3)}"
)
| 288 | 1 |
"""simple docstring"""
import re
import warnings
from contextlib import contextmanager
from ...processing_utils import ProcessorMixin
class lowerCamelCase ( _UpperCAmelCase ):
lowercase : Tuple = ['image_processor', 'tokenizer']
lowercase : Union[str, Any] = 'AutoImageProcessor'
lowercase : Dict = 'AutoTokenizer'
def __init__( self , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=None , **SCREAMING_SNAKE_CASE_ ):
UpperCamelCase : int = None
if "feature_extractor" in kwargs:
warnings.warn(
"""The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"""
""" instead.""" , SCREAMING_SNAKE_CASE_ , )
UpperCamelCase : Tuple = kwargs.pop("""feature_extractor""" )
UpperCamelCase : Optional[Any] = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("""You need to specify an `image_processor`.""" )
if tokenizer is None:
raise ValueError("""You need to specify a `tokenizer`.""" )
super().__init__(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
UpperCamelCase : int = self.image_processor
UpperCamelCase : int = False
def __call__( self , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ):
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor(*SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Any = kwargs.pop("""images""" , SCREAMING_SNAKE_CASE_ )
UpperCamelCase : str = kwargs.pop("""text""" , SCREAMING_SNAKE_CASE_ )
if len(SCREAMING_SNAKE_CASE_ ) > 0:
UpperCamelCase : List[Any] = args[0]
UpperCamelCase : Optional[int] = args[1:]
if images is None and text is None:
raise ValueError("""You need to specify either an `images` or `text` input to process.""" )
if images is not None:
UpperCamelCase : Optional[Any] = self.image_processor(SCREAMING_SNAKE_CASE_ , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
if text is not None:
UpperCamelCase : str = self.tokenizer(SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
if text is None:
return inputs
elif images is None:
return encodings
else:
UpperCamelCase : str = encodings["""input_ids"""]
return inputs
def a_ ( self , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ):
return self.tokenizer.batch_decode(*SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
def a_ ( self , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ):
return self.tokenizer.decode(*SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
@contextmanager
def a_ ( self ):
warnings.warn(
"""`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your """
"""labels by using the argument `text` of the regular `__call__` method (either in the same call as """
"""your images inputs, or in a separate call.""" )
UpperCamelCase : Tuple = True
UpperCamelCase : Dict = self.tokenizer
yield
UpperCamelCase : Optional[Any] = self.image_processor
UpperCamelCase : Dict = False
def a_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=None ):
if added_vocab is None:
UpperCamelCase : Tuple = self.tokenizer.get_added_vocab()
UpperCamelCase : List[str] = {}
while tokens:
UpperCamelCase : Union[str, Any] = re.search(r"""<s_(.*?)>""" , SCREAMING_SNAKE_CASE_ , re.IGNORECASE )
if start_token is None:
break
UpperCamelCase : int = start_token.group(1 )
UpperCamelCase : int = re.search(rf'</s_{key}>' , SCREAMING_SNAKE_CASE_ , re.IGNORECASE )
UpperCamelCase : str = start_token.group()
if end_token is None:
UpperCamelCase : Union[str, Any] = tokens.replace(SCREAMING_SNAKE_CASE_ , """""" )
else:
UpperCamelCase : Optional[Any] = end_token.group()
UpperCamelCase : Optional[int] = re.escape(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Optional[Any] = re.escape(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : int = re.search(f'{start_token_escaped}(.*?){end_token_escaped}' , SCREAMING_SNAKE_CASE_ , re.IGNORECASE )
if content is not None:
UpperCamelCase : str = content.group(1 ).strip()
if r"<s_" in content and r"</s_" in content: # non-leaf node
UpperCamelCase : Any = self.tokenajson(SCREAMING_SNAKE_CASE_ , is_inner_value=SCREAMING_SNAKE_CASE_ , added_vocab=SCREAMING_SNAKE_CASE_ )
if value:
if len(SCREAMING_SNAKE_CASE_ ) == 1:
UpperCamelCase : Any = value[0]
UpperCamelCase : Union[str, Any] = value
else: # leaf nodes
UpperCamelCase : int = []
for leaf in content.split(r"""<sep/>""" ):
UpperCamelCase : List[str] = leaf.strip()
if leaf in added_vocab and leaf[0] == "<" and leaf[-2:] == "/>":
UpperCamelCase : str = leaf[1:-2] # for categorical special tokens
output[key].append(SCREAMING_SNAKE_CASE_ )
if len(output[key] ) == 1:
UpperCamelCase : List[Any] = output[key][0]
UpperCamelCase : Dict = tokens[tokens.find(SCREAMING_SNAKE_CASE_ ) + len(SCREAMING_SNAKE_CASE_ ) :].strip()
if tokens[:6] == r"<sep/>": # non-leaf nodes
return [output] + self.tokenajson(tokens[6:] , is_inner_value=SCREAMING_SNAKE_CASE_ , added_vocab=SCREAMING_SNAKE_CASE_ )
if len(SCREAMING_SNAKE_CASE_ ):
return [output] if is_inner_value else output
else:
return [] if is_inner_value else {"text_sequence": tokens}
@property
def a_ ( self ):
warnings.warn(
"""`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.""" , SCREAMING_SNAKE_CASE_ , )
return self.image_processor_class
@property
def a_ ( self ):
warnings.warn(
"""`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.""" , SCREAMING_SNAKE_CASE_ , )
return self.image_processor
| 27 |
"""simple docstring"""
import argparse
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.local_sgd import LocalSGD
########################################################################
# This is a fully working simple example to use Accelerate
# with LocalSGD, which is a method to synchronize model
# parameters every K batches. It is different, but complementary
# to gradient accumulation.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
__A : Optional[Any] = 16
__A : str = 32
def A_ ( snake_case_ : Accelerator ,snake_case_ : int = 1_6 ):
'''simple docstring'''
UpperCamelCase : Tuple = AutoTokenizer.from_pretrained("""bert-base-cased""" )
UpperCamelCase : Optional[int] = load_dataset("""glue""" ,"""mrpc""" )
def tokenize_function(snake_case_ : List[Any] ):
# max_length=None => use the model max length (it's actually the default)
UpperCamelCase : Union[str, Any] = tokenizer(examples["""sentence1"""] ,examples["""sentence2"""] ,truncation=snake_case_ ,max_length=snake_case_ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
UpperCamelCase : Optional[Any] = datasets.map(
snake_case_ ,batched=snake_case_ ,remove_columns=["""idx""", """sentence1""", """sentence2"""] ,)
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
UpperCamelCase : str = tokenized_datasets.rename_column("""label""" ,"""labels""" )
def collate_fn(snake_case_ : Any ):
# On TPU it's best to pad everything to the same length or training will be very slow.
UpperCamelCase : Union[str, Any] = 1_2_8 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
UpperCamelCase : Optional[Any] = 1_6
elif accelerator.mixed_precision != "no":
UpperCamelCase : Any = 8
else:
UpperCamelCase : Optional[Any] = None
return tokenizer.pad(
snake_case_ ,padding="""longest""" ,max_length=snake_case_ ,pad_to_multiple_of=snake_case_ ,return_tensors="""pt""" ,)
# Instantiate dataloaders.
UpperCamelCase : str = DataLoader(
tokenized_datasets["""train"""] ,shuffle=snake_case_ ,collate_fn=snake_case_ ,batch_size=snake_case_ )
UpperCamelCase : Dict = DataLoader(
tokenized_datasets["""validation"""] ,shuffle=snake_case_ ,collate_fn=snake_case_ ,batch_size=snake_case_ )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get('''TESTING_MOCKED_DATALOADERS''', None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
__A : int = mocked_dataloaders # noqa: F811
def A_ ( snake_case_ : Tuple ,snake_case_ : Dict ):
'''simple docstring'''
# For testing only
if os.environ.get("""TESTING_MOCKED_DATALOADERS""" ,snake_case_ ) == "1":
UpperCamelCase : Union[str, Any] = 2
# New Code #
UpperCamelCase : Dict = int(args.gradient_accumulation_steps )
UpperCamelCase : List[Any] = int(args.local_sgd_steps )
# Initialize accelerator
UpperCamelCase : str = Accelerator(
cpu=args.cpu ,mixed_precision=args.mixed_precision ,gradient_accumulation_steps=snake_case_ )
if accelerator.distributed_type not in [DistributedType.NO, DistributedType.MULTI_CPU, DistributedType.MULTI_GPU]:
raise NotImplementedError("""LocalSGD is supported only for CPUs and GPUs (no DeepSpeed or MegatronLM)""" )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
UpperCamelCase : Union[str, Any] = config["""lr"""]
UpperCamelCase : int = int(config["""num_epochs"""] )
UpperCamelCase : int = int(config["""seed"""] )
UpperCamelCase : List[Any] = int(config["""batch_size"""] )
UpperCamelCase : Optional[int] = evaluate.load("""glue""" ,"""mrpc""" )
set_seed(snake_case_ )
UpperCamelCase , UpperCamelCase : Dict = get_dataloaders(snake_case_ ,snake_case_ )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
UpperCamelCase : Optional[int] = AutoModelForSequenceClassification.from_pretrained("""bert-base-cased""" ,return_dict=snake_case_ )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
UpperCamelCase : Tuple = model.to(accelerator.device )
# Instantiate optimizer
UpperCamelCase : List[Any] = AdamW(params=model.parameters() ,lr=snake_case_ )
# Instantiate scheduler
UpperCamelCase : str = get_linear_schedule_with_warmup(
optimizer=snake_case_ ,num_warmup_steps=1_0_0 ,num_training_steps=(len(snake_case_ ) * num_epochs) ,)
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase : Any = accelerator.prepare(
snake_case_ ,snake_case_ ,snake_case_ ,snake_case_ ,snake_case_ )
# Now we train the model
for epoch in range(snake_case_ ):
model.train()
with LocalSGD(
accelerator=snake_case_ ,model=snake_case_ ,local_sgd_steps=snake_case_ ,enabled=local_sgd_steps is not None ) as local_sgd:
for step, batch in enumerate(snake_case_ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
# New code #
# We use the new `accumulate` context manager to perform gradient accumulation
# We also currently do not support TPUs nor advise it as bugs were found on the XLA side when running our tests.
with accelerator.accumulate(snake_case_ ):
UpperCamelCase : Optional[Any] = model(**snake_case_ )
UpperCamelCase : Optional[int] = output.loss
accelerator.backward(snake_case_ )
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
# LocalSGD-specific line
local_sgd.step()
model.eval()
for step, batch in enumerate(snake_case_ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
UpperCamelCase : Any = model(**snake_case_ )
UpperCamelCase : Tuple = outputs.logits.argmax(dim=-1 )
UpperCamelCase , UpperCamelCase : int = accelerator.gather_for_metrics((predictions, batch["""labels"""]) )
metric.add_batch(
predictions=snake_case_ ,references=snake_case_ ,)
UpperCamelCase : str = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(f'epoch {epoch}:' ,snake_case_ )
def A_ ( ):
'''simple docstring'''
UpperCamelCase : str = argparse.ArgumentParser(description="""Simple example of training script.""" )
parser.add_argument(
"""--mixed_precision""" ,type=snake_case_ ,default=snake_case_ ,choices=["""no""", """fp16""", """bf16""", """fp8"""] ,help="""Whether to use mixed precision. Choose"""
"""between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."""
"""and an Nvidia Ampere GPU.""" ,)
# New Code #
parser.add_argument(
"""--gradient_accumulation_steps""" ,type=snake_case_ ,default=1 ,help="""The number of minibatches to be ran before gradients are accumulated.""" ,)
parser.add_argument(
"""--local_sgd_steps""" ,type=snake_case_ ,default=8 ,help="""Number of local SGD steps or None to disable local SGD""" )
parser.add_argument("""--cpu""" ,action="""store_true""" ,help="""If passed, will train on the CPU.""" )
UpperCamelCase : Dict = parser.parse_args()
UpperCamelCase : List[Any] = {"""lr""": 2e-5, """num_epochs""": 3, """seed""": 4_2, """batch_size""": 1_6}
training_function(snake_case_ ,snake_case_ )
if __name__ == "__main__":
main()
| 27 | 1 |
import os
import tempfile
import unittest
from pathlib import Path
from transformers import AutoConfig, is_torch_available
from transformers.testing_utils import require_torch, torch_device
if is_torch_available():
from transformers import PyTorchBenchmark, PyTorchBenchmarkArguments
@require_torch
class __snake_case ( unittest.TestCase ):
def __a ( self , __UpperCamelCase ) -> Dict:
'''simple docstring'''
for model_result in results.values():
for batch_size, sequence_length in zip(model_result['bs'] , model_result['ss'] ):
snake_case__ : Optional[int] = model_result['result'][batch_size][sequence_length]
self.assertIsNotNone(__UpperCamelCase )
def __a ( self ) -> Dict:
'''simple docstring'''
snake_case__ : List[str] = 'sshleifer/tiny-gpt2'
snake_case__ : str = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=__UpperCamelCase , inference=__UpperCamelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__UpperCamelCase , )
snake_case__ : Optional[Any] = PyTorchBenchmark(__UpperCamelCase )
snake_case__ : Optional[int] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def __a ( self ) -> Tuple:
'''simple docstring'''
snake_case__ : List[Any] = 'sgugger/tiny-distilbert-classification'
snake_case__ : str = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=__UpperCamelCase , inference=__UpperCamelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__UpperCamelCase , only_pretrain_model=__UpperCamelCase , )
snake_case__ : Dict = PyTorchBenchmark(__UpperCamelCase )
snake_case__ : List[Any] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def __a ( self ) -> Union[str, Any]:
'''simple docstring'''
snake_case__ : Dict = 'sshleifer/tiny-gpt2'
snake_case__ : List[str] = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=__UpperCamelCase , inference=__UpperCamelCase , torchscript=__UpperCamelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__UpperCamelCase , )
snake_case__ : Optional[Any] = PyTorchBenchmark(__UpperCamelCase )
snake_case__ : str = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
@unittest.skipIf(torch_device == 'cpu' , 'Cant do half precision' )
def __a ( self ) -> int:
'''simple docstring'''
snake_case__ : Any = 'sshleifer/tiny-gpt2'
snake_case__ : Dict = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=__UpperCamelCase , inference=__UpperCamelCase , fpaa=__UpperCamelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__UpperCamelCase , )
snake_case__ : Optional[int] = PyTorchBenchmark(__UpperCamelCase )
snake_case__ : List[Any] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def __a ( self ) -> List[Any]:
'''simple docstring'''
snake_case__ : Union[str, Any] = 'sshleifer/tiny-gpt2'
snake_case__ : Optional[int] = AutoConfig.from_pretrained(__UpperCamelCase )
# set architectures equal to `None`
snake_case__ : Union[str, Any] = None
snake_case__ : List[str] = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=__UpperCamelCase , inference=__UpperCamelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__UpperCamelCase , )
snake_case__ : List[Any] = PyTorchBenchmark(__UpperCamelCase , configs=[config] )
snake_case__ : List[Any] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def __a ( self ) -> Any:
'''simple docstring'''
snake_case__ : Optional[Any] = 'sshleifer/tiny-gpt2'
snake_case__ : str = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=__UpperCamelCase , inference=__UpperCamelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__UpperCamelCase , )
snake_case__ : str = PyTorchBenchmark(__UpperCamelCase )
snake_case__ : Optional[Any] = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
@unittest.skipIf(torch_device == 'cpu' , 'Can\'t do half precision' )
def __a ( self ) -> Tuple:
'''simple docstring'''
snake_case__ : List[str] = 'sshleifer/tiny-gpt2'
snake_case__ : Dict = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=__UpperCamelCase , inference=__UpperCamelCase , sequence_lengths=[8] , batch_sizes=[1] , fpaa=__UpperCamelCase , multi_process=__UpperCamelCase , )
snake_case__ : List[Any] = PyTorchBenchmark(__UpperCamelCase )
snake_case__ : int = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def __a ( self ) -> int:
'''simple docstring'''
snake_case__ : Union[str, Any] = 'sshleifer/tiny-gpt2'
snake_case__ : Optional[Any] = AutoConfig.from_pretrained(__UpperCamelCase )
snake_case__ : str = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=__UpperCamelCase , inference=__UpperCamelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__UpperCamelCase , )
snake_case__ : List[str] = PyTorchBenchmark(__UpperCamelCase , configs=[config] )
snake_case__ : Dict = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def __a ( self ) -> Dict:
'''simple docstring'''
snake_case__ : List[str] = 'sshleifer/tinier_bart'
snake_case__ : List[str] = AutoConfig.from_pretrained(__UpperCamelCase )
snake_case__ : Any = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=__UpperCamelCase , inference=__UpperCamelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__UpperCamelCase , )
snake_case__ : int = PyTorchBenchmark(__UpperCamelCase , configs=[config] )
snake_case__ : Dict = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def __a ( self ) -> int:
'''simple docstring'''
snake_case__ : Any = 'sshleifer/tiny-gpt2'
snake_case__ : Tuple = AutoConfig.from_pretrained(__UpperCamelCase )
snake_case__ : List[Any] = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=__UpperCamelCase , inference=__UpperCamelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__UpperCamelCase , )
snake_case__ : int = PyTorchBenchmark(__UpperCamelCase , configs=[config] )
snake_case__ : Any = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def __a ( self ) -> Tuple:
'''simple docstring'''
snake_case__ : Optional[int] = 'sshleifer/tinier_bart'
snake_case__ : Union[str, Any] = AutoConfig.from_pretrained(__UpperCamelCase )
snake_case__ : Union[str, Any] = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=__UpperCamelCase , inference=__UpperCamelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__UpperCamelCase , )
snake_case__ : Union[str, Any] = PyTorchBenchmark(__UpperCamelCase , configs=[config] )
snake_case__ : Tuple = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def __a ( self ) -> Optional[int]:
'''simple docstring'''
snake_case__ : Union[str, Any] = 'sshleifer/tiny-gpt2'
with tempfile.TemporaryDirectory() as tmp_dir:
snake_case__ : List[str] = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=__UpperCamelCase , inference=__UpperCamelCase , save_to_csv=__UpperCamelCase , sequence_lengths=[8] , batch_sizes=[1] , inference_time_csv_file=os.path.join(__UpperCamelCase , 'inf_time.csv' ) , train_memory_csv_file=os.path.join(__UpperCamelCase , 'train_mem.csv' ) , inference_memory_csv_file=os.path.join(__UpperCamelCase , 'inf_mem.csv' ) , train_time_csv_file=os.path.join(__UpperCamelCase , 'train_time.csv' ) , env_info_csv_file=os.path.join(__UpperCamelCase , 'env.csv' ) , multi_process=__UpperCamelCase , )
snake_case__ : Union[str, Any] = PyTorchBenchmark(__UpperCamelCase )
benchmark.run()
self.assertTrue(Path(os.path.join(__UpperCamelCase , 'inf_time.csv' ) ).exists() )
self.assertTrue(Path(os.path.join(__UpperCamelCase , 'train_time.csv' ) ).exists() )
self.assertTrue(Path(os.path.join(__UpperCamelCase , 'inf_mem.csv' ) ).exists() )
self.assertTrue(Path(os.path.join(__UpperCamelCase , 'train_mem.csv' ) ).exists() )
self.assertTrue(Path(os.path.join(__UpperCamelCase , 'env.csv' ) ).exists() )
def __a ( self ) -> Tuple:
'''simple docstring'''
snake_case__ : Optional[int] = 'sshleifer/tiny-gpt2'
def _check_summary_is_not_empty(__UpperCamelCase ):
self.assertTrue(hasattr(__UpperCamelCase , 'sequential' ) )
self.assertTrue(hasattr(__UpperCamelCase , 'cumulative' ) )
self.assertTrue(hasattr(__UpperCamelCase , 'current' ) )
self.assertTrue(hasattr(__UpperCamelCase , 'total' ) )
with tempfile.TemporaryDirectory() as tmp_dir:
snake_case__ : Tuple = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=__UpperCamelCase , inference=__UpperCamelCase , sequence_lengths=[8] , batch_sizes=[1] , log_filename=os.path.join(__UpperCamelCase , 'log.txt' ) , log_print=__UpperCamelCase , trace_memory_line_by_line=__UpperCamelCase , multi_process=__UpperCamelCase , )
snake_case__ : int = PyTorchBenchmark(__UpperCamelCase )
snake_case__ : Optional[Any] = benchmark.run()
_check_summary_is_not_empty(result.inference_summary )
_check_summary_is_not_empty(result.train_summary )
self.assertTrue(Path(os.path.join(__UpperCamelCase , 'log.txt' ) ).exists() )
| 143 | import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_rembert import RemBertTokenizer
else:
lowerCAmelCase__ : Dict = None
lowerCAmelCase__ : Tuple = logging.get_logger(__name__)
lowerCAmelCase__ : Union[str, Any] = {'''vocab_file''': '''sentencepiece.model''', '''tokenizer_file''': '''tokenizer.json'''}
lowerCAmelCase__ : Dict = {
'''vocab_file''': {
'''google/rembert''': '''https://huggingface.co/google/rembert/resolve/main/sentencepiece.model''',
},
'''tokenizer_file''': {
'''google/rembert''': '''https://huggingface.co/google/rembert/resolve/main/tokenizer.json''',
},
}
lowerCAmelCase__ : List[Any] = {
'''google/rembert''': 2_56,
}
lowerCAmelCase__ : List[str] = '''▁'''
class __snake_case ( _lowerCamelCase ):
__lowerCamelCase = VOCAB_FILES_NAMES
__lowerCamelCase = PRETRAINED_VOCAB_FILES_MAP
__lowerCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowerCamelCase = RemBertTokenizer
def __init__( self , __UpperCamelCase=None , __UpperCamelCase=None , __UpperCamelCase=True , __UpperCamelCase=True , __UpperCamelCase=False , __UpperCamelCase="[CLS]" , __UpperCamelCase="[SEP]" , __UpperCamelCase="<unk>" , __UpperCamelCase="[SEP]" , __UpperCamelCase="<pad>" , __UpperCamelCase="[CLS]" , __UpperCamelCase="[MASK]" , **__UpperCamelCase , ) -> Tuple:
'''simple docstring'''
snake_case__ : Optional[Any] = AddedToken(__UpperCamelCase , lstrip=__UpperCamelCase , rstrip=__UpperCamelCase ) if isinstance(__UpperCamelCase , __UpperCamelCase ) else mask_token
super().__init__(
__UpperCamelCase , tokenizer_file=__UpperCamelCase , do_lower_case=__UpperCamelCase , remove_space=__UpperCamelCase , keep_accents=__UpperCamelCase , bos_token=__UpperCamelCase , eos_token=__UpperCamelCase , unk_token=__UpperCamelCase , sep_token=__UpperCamelCase , pad_token=__UpperCamelCase , cls_token=__UpperCamelCase , mask_token=__UpperCamelCase , **__UpperCamelCase , )
snake_case__ : int = do_lower_case
snake_case__ : Any = remove_space
snake_case__ : List[Any] = keep_accents
snake_case__ : Dict = vocab_file
snake_case__ : int = False if not self.vocab_file else True
def __a ( self , __UpperCamelCase , __UpperCamelCase = None ) -> List[int]:
'''simple docstring'''
snake_case__ : Dict = [self.sep_token_id]
snake_case__ : List[str] = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def __a ( self , __UpperCamelCase , __UpperCamelCase = None , __UpperCamelCase = False ) -> List[int]:
'''simple docstring'''
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
'You should not supply a second sequence if the provided sequence of '
'ids is already formatted with special tokens for the model.' )
return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a]
if token_ids_a is not None:
return [1] + ([0] * len(__UpperCamelCase )) + [1] + ([0] * len(__UpperCamelCase )) + [1]
return [1] + ([0] * len(__UpperCamelCase )) + [1]
def __a ( self , __UpperCamelCase , __UpperCamelCase = None ) -> List[int]:
'''simple docstring'''
snake_case__ : List[Any] = [self.sep_token_id]
snake_case__ : Tuple = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __a ( self , __UpperCamelCase , __UpperCamelCase = None ) -> Tuple[str]:
'''simple docstring'''
if not os.path.isdir(__UpperCamelCase ):
logger.error('Vocabulary path ({}) should be a directory'.format(__UpperCamelCase ) )
return
snake_case__ : List[str] = os.path.join(
__UpperCamelCase , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__UpperCamelCase ):
copyfile(self.vocab_file , __UpperCamelCase )
return (out_vocab_file,)
| 143 | 1 |
"""simple docstring"""
from dataclasses import dataclass, field
from typing import Tuple
from ..utils import cached_property, is_torch_available, is_torch_tpu_available, logging, requires_backends
from .benchmark_args_utils import BenchmarkArguments
if is_torch_available():
import torch
if is_torch_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
UpperCamelCase = logging.get_logger(__name__)
@dataclass
class _lowerCamelCase ( UpperCamelCase ):
"""simple docstring"""
snake_case = [
"no_inference",
"no_cuda",
"no_tpu",
"no_speed",
"no_memory",
"no_env_print",
"no_multi_process",
]
def __init__( self , **_SCREAMING_SNAKE_CASE )->Union[str, Any]:
'''simple docstring'''
for deprecated_arg in self.deprecated_args:
if deprecated_arg in kwargs:
A_ : Optional[int] = deprecated_arg[3:]
setattr(self , _snake_case , not kwargs.pop(_snake_case ) )
logger.warning(
F'''{deprecated_arg} is depreciated. Please use --no_{positive_arg} or'''
F''' {positive_arg}={kwargs[positive_arg]}''' )
A_ : List[Any] = kwargs.pop('''torchscript''' , self.torchscript )
A_ : Dict = kwargs.pop('''torch_xla_tpu_print_metrics''' , self.torch_xla_tpu_print_metrics )
A_ : List[str] = kwargs.pop('''fp16_opt_level''' , self.fpaa_opt_level )
super().__init__(**_snake_case )
snake_case = field(default=UpperCamelCase , metadata={"help": "Trace the models using torchscript"} )
snake_case = field(default=UpperCamelCase , metadata={"help": "Print Xla/PyTorch tpu metrics"} )
snake_case = field(
default="O1" , metadata={
"help": (
"For fp16: Apex AMP optimization level selected in [\'O0\', \'O1\', \'O2\', and \'O3\']. "
"See details at https://nvidia.github.io/apex/amp.html"
)
} , )
@cached_property
def _snake_case ( self )->Tuple["torch.device", int]:
'''simple docstring'''
requires_backends(self , ['''torch'''] )
logger.info('''PyTorch: setting up devices''' )
if not self.cuda:
A_ : Tuple = torch.device('''cpu''' )
A_ : Optional[int] = 0
elif is_torch_tpu_available():
A_ : Any = xm.xla_device()
A_ : int = 0
else:
A_ : int = torch.device('''cuda''' if torch.cuda.is_available() else '''cpu''' )
A_ : Optional[int] = torch.cuda.device_count()
return device, n_gpu
@property
def _snake_case ( self )->int:
'''simple docstring'''
return is_torch_tpu_available() and self.tpu
@property
def _snake_case ( self )->int:
'''simple docstring'''
requires_backends(self , ['''torch'''] )
# TODO(PVP): currently only single GPU is supported
return torch.cuda.current_device()
@property
def _snake_case ( self )->"torch.device":
'''simple docstring'''
requires_backends(self , ['''torch'''] )
return self._setup_devices[0]
@property
def _snake_case ( self )->Any:
'''simple docstring'''
requires_backends(self , ['''torch'''] )
return self._setup_devices[1]
@property
def _snake_case ( self )->Optional[int]:
'''simple docstring'''
return self.n_gpu > 0
| 361 |
from __future__ import annotations
import math
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE ):
if num <= 0:
A_ : Optional[int] = f'''{num}: Invalid input, please enter a positive integer.'''
raise ValueError(SCREAMING_SNAKE_CASE )
A_ : Union[str, Any] = [True] * (num + 1)
A_ : Tuple = []
A_ : Union[str, Any] = 2
A_ : Any = int(math.sqrt(SCREAMING_SNAKE_CASE ) )
while start <= end:
# If start is a prime
if sieve[start] is True:
prime.append(SCREAMING_SNAKE_CASE )
# Set multiples of start be False
for i in range(start * start , num + 1 , SCREAMING_SNAKE_CASE ):
if sieve[i] is True:
A_ : Union[str, Any] = False
start += 1
for j in range(end + 1 , num + 1 ):
if sieve[j] is True:
prime.append(SCREAMING_SNAKE_CASE )
return prime
if __name__ == "__main__":
print(prime_sieve(int(input("""Enter a positive integer: """).strip())))
| 65 | 0 |
_UpperCamelCase = "\n# Transformers installation\n! pip install transformers datasets\n# To install from source instead of the last release, comment the command above and uncomment the following one.\n# ! pip install git+https://github.com/huggingface/transformers.git\n"
_UpperCamelCase = [{"type": "code", "content": INSTALL_CONTENT}]
_UpperCamelCase = {
"{processor_class}": "FakeProcessorClass",
"{model_class}": "FakeModelClass",
"{object_class}": "FakeObjectClass",
}
| 275 |
import itertools
import os
import random
import tempfile
import unittest
import numpy as np
from datasets import load_dataset
from transformers import is_speech_available
from transformers.testing_utils import check_json_file_has_correct_format, require_torch, require_torchaudio
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_speech_available():
from transformers import WhisperFeatureExtractor
if is_torch_available():
import torch
_UpperCamelCase = random.Random()
def _lowercase ( lowercase__ , lowercase__=1.0 , lowercase__=None , lowercase__=None ):
if rng is None:
__lowerCAmelCase : Any = global_rng
__lowerCAmelCase : str = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
@require_torch
@require_torchaudio
class __lowercase (unittest.TestCase ):
def __init__( self , A_ , A_=7 , A_=400 , A_=2000 , A_=10 , A_=160 , A_=8 , A_=0.0 , A_=4000 , A_=False , A_=True , ) ->List[str]:
'''simple docstring'''
__lowerCAmelCase : Optional[int] = parent
__lowerCAmelCase : Dict = batch_size
__lowerCAmelCase : str = min_seq_length
__lowerCAmelCase : int = max_seq_length
__lowerCAmelCase : Any = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
__lowerCAmelCase : Any = padding_value
__lowerCAmelCase : str = sampling_rate
__lowerCAmelCase : Optional[Any] = return_attention_mask
__lowerCAmelCase : Optional[Any] = do_normalize
__lowerCAmelCase : Optional[Any] = feature_size
__lowerCAmelCase : Optional[int] = chunk_length
__lowerCAmelCase : Optional[Any] = hop_length
def UpperCamelCase__ ( self ) ->Optional[Any]:
'''simple docstring'''
return {
"feature_size": self.feature_size,
"hop_length": self.hop_length,
"chunk_length": self.chunk_length,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"return_attention_mask": self.return_attention_mask,
"do_normalize": self.do_normalize,
}
def UpperCamelCase__ ( self , A_=False , A_=False ) ->Optional[Any]:
'''simple docstring'''
def _flatten(A_ ):
return list(itertools.chain(*A_ ) )
if equal_length:
__lowerCAmelCase : str = [floats_list((self.max_seq_length, self.feature_size) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
__lowerCAmelCase : Any = [
floats_list((x, self.feature_size) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
__lowerCAmelCase : Optional[Any] = [np.asarray(A_ ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class __lowercase (_UpperCAmelCase , unittest.TestCase ):
_UpperCamelCase = WhisperFeatureExtractor if is_speech_available() else None
def UpperCamelCase__ ( self ) ->Optional[int]:
'''simple docstring'''
__lowerCAmelCase : Tuple = WhisperFeatureExtractionTester(self )
def UpperCamelCase__ ( self ) ->int:
'''simple docstring'''
__lowerCAmelCase : str = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
__lowerCAmelCase : List[str] = feat_extract_first.save_pretrained(A_ )[0]
check_json_file_has_correct_format(A_ )
__lowerCAmelCase : int = self.feature_extraction_class.from_pretrained(A_ )
__lowerCAmelCase : Dict = feat_extract_first.to_dict()
__lowerCAmelCase : Union[str, Any] = feat_extract_second.to_dict()
__lowerCAmelCase : Union[str, Any] = feat_extract_first.mel_filters
__lowerCAmelCase : Dict = feat_extract_second.mel_filters
self.assertTrue(np.allclose(A_ , A_ ) )
self.assertEqual(A_ , A_ )
def UpperCamelCase__ ( self ) ->str:
'''simple docstring'''
__lowerCAmelCase : str = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
__lowerCAmelCase : Union[str, Any] = os.path.join(A_ , '''feat_extract.json''' )
feat_extract_first.to_json_file(A_ )
__lowerCAmelCase : List[str] = self.feature_extraction_class.from_json_file(A_ )
__lowerCAmelCase : List[str] = feat_extract_first.to_dict()
__lowerCAmelCase : Tuple = feat_extract_second.to_dict()
__lowerCAmelCase : Any = feat_extract_first.mel_filters
__lowerCAmelCase : List[str] = feat_extract_second.mel_filters
self.assertTrue(np.allclose(A_ , A_ ) )
self.assertEqual(A_ , A_ )
def UpperCamelCase__ ( self ) ->List[Any]:
'''simple docstring'''
__lowerCAmelCase : List[str] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
__lowerCAmelCase : Dict = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
__lowerCAmelCase : Dict = [np.asarray(A_ ) for speech_input in speech_inputs]
# Test feature size
__lowerCAmelCase : Tuple = feature_extractor(A_ , padding='''max_length''' , return_tensors='''np''' ).input_features
self.assertTrue(input_features.ndim == 3 )
self.assertTrue(input_features.shape[-1] == feature_extractor.nb_max_frames )
self.assertTrue(input_features.shape[-2] == feature_extractor.feature_size )
# Test not batched input
__lowerCAmelCase : Dict = feature_extractor(speech_inputs[0] , return_tensors='''np''' ).input_features
__lowerCAmelCase : List[str] = feature_extractor(np_speech_inputs[0] , return_tensors='''np''' ).input_features
self.assertTrue(np.allclose(A_ , A_ , atol=1e-3 ) )
# Test batched
__lowerCAmelCase : Union[str, Any] = feature_extractor(A_ , return_tensors='''np''' ).input_features
__lowerCAmelCase : List[Any] = feature_extractor(A_ , return_tensors='''np''' ).input_features
for enc_seq_a, enc_seq_a in zip(A_ , A_ ):
self.assertTrue(np.allclose(A_ , A_ , atol=1e-3 ) )
# Test 2-D numpy arrays are batched.
__lowerCAmelCase : int = [floats_list((1, x) )[0] for x in (800, 800, 800)]
__lowerCAmelCase : Optional[int] = np.asarray(A_ )
__lowerCAmelCase : Dict = feature_extractor(A_ , return_tensors='''np''' ).input_features
__lowerCAmelCase : Any = feature_extractor(A_ , return_tensors='''np''' ).input_features
for enc_seq_a, enc_seq_a in zip(A_ , A_ ):
self.assertTrue(np.allclose(A_ , A_ , atol=1e-3 ) )
# Test truncation required
__lowerCAmelCase : Optional[int] = [floats_list((1, x) )[0] for x in range(200 , (feature_extractor.n_samples + 500) , 200 )]
__lowerCAmelCase : Dict = [np.asarray(A_ ) for speech_input in speech_inputs]
__lowerCAmelCase : Tuple = [x[: feature_extractor.n_samples] for x in speech_inputs]
__lowerCAmelCase : Optional[int] = [np.asarray(A_ ) for speech_input in speech_inputs_truncated]
__lowerCAmelCase : Any = feature_extractor(A_ , return_tensors='''np''' ).input_features
__lowerCAmelCase : List[str] = feature_extractor(A_ , return_tensors='''np''' ).input_features
for enc_seq_a, enc_seq_a in zip(A_ , A_ ):
self.assertTrue(np.allclose(A_ , A_ , atol=1e-3 ) )
def UpperCamelCase__ ( self ) ->Dict:
'''simple docstring'''
import torch
__lowerCAmelCase : Tuple = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
__lowerCAmelCase : List[Any] = np.random.rand(100 , 32 ).astype(np.floataa )
__lowerCAmelCase : Any = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
__lowerCAmelCase : Tuple = feature_extractor.pad([{'''input_features''': inputs}] , return_tensors='''np''' )
self.assertTrue(np_processed.input_features.dtype == np.floataa )
__lowerCAmelCase : int = feature_extractor.pad([{'''input_features''': inputs}] , return_tensors='''pt''' )
self.assertTrue(pt_processed.input_features.dtype == torch.floataa )
def UpperCamelCase__ ( self , A_ ) ->str:
'''simple docstring'''
__lowerCAmelCase : Any = load_dataset('''hf-internal-testing/librispeech_asr_dummy''' , '''clean''' , split='''validation''' )
# automatic decoding with librispeech
__lowerCAmelCase : Union[str, Any] = ds.sort('''id''' ).select(range(A_ ) )[:num_samples]['''audio''']
return [x["array"] for x in speech_samples]
def UpperCamelCase__ ( self ) ->int:
'''simple docstring'''
__lowerCAmelCase : Optional[int] = torch.tensor(
[
0.1_193, -0.0_946, -0.1_098, -0.0_196, 0.0_225, -0.0_690, -0.1_736, 0.0_951,
0.0_971, -0.0_817, -0.0_702, 0.0_162, 0.0_260, 0.0_017, -0.0_192, -0.1_678,
0.0_709, -0.1_867, -0.0_655, -0.0_274, -0.0_234, -0.1_884, -0.0_516, -0.0_554,
-0.0_274, -0.1_425, -0.1_423, 0.0_837, 0.0_377, -0.0_854
] )
# fmt: on
__lowerCAmelCase : int = self._load_datasamples(1 )
__lowerCAmelCase : Any = WhisperFeatureExtractor()
__lowerCAmelCase : Optional[Any] = feature_extractor(A_ , return_tensors='''pt''' ).input_features
self.assertEqual(input_features.shape , (1, 80, 3000) )
self.assertTrue(torch.allclose(input_features[0, 0, :30] , A_ , atol=1e-4 ) )
def UpperCamelCase__ ( self ) ->Any:
'''simple docstring'''
__lowerCAmelCase : Tuple = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
__lowerCAmelCase : str = self._load_datasamples(1 )[0]
__lowerCAmelCase : Optional[Any] = ((audio - audio.min()) / (audio.max() - audio.min())) * 6_5535 # Rescale to [0, 65535] to show issue
__lowerCAmelCase : Union[str, Any] = feat_extract.zero_mean_unit_var_norm([audio] , attention_mask=A_ )[0]
self.assertTrue(np.all(np.mean(A_ ) < 1e-3 ) )
self.assertTrue(np.all(np.abs(np.var(A_ ) - 1 ) < 1e-3 ) )
| 275 | 1 |
'''simple docstring'''
from typing import Union
import fire
import torch
from tqdm import tqdm
def __magic_name__ ( A , A = "cpu" , A = None ) -> None:
snake_case = torch.load(A , map_location=A )
for k, v in tqdm(state_dict.items() ):
if not isinstance(A , torch.Tensor ):
raise TypeError('FP16 conversion only works on paths that are saved state dicts, like pytorch_model.bin' )
snake_case = v.half()
if save_path is None: # overwrite src_path
snake_case = src_path
torch.save(A , A )
if __name__ == "__main__":
fire.Fire(convert)
| 332 |
'''simple docstring'''
import json
from typing import Dict, List, Optional, Tuple, Union
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding, EncodedInput
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import PaddingStrategy, logging
from .tokenization_led import LEDTokenizer
lowerCAmelCase_ = logging.get_logger(__name__)
lowerCAmelCase_ = {"vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_file": "tokenizer.json"}
lowerCAmelCase_ = {
"vocab_file": {
"allenai/led-base-16384": "https://huggingface.co/allenai/led-base-16384/resolve/main/vocab.json",
},
"merges_file": {
"allenai/led-base-16384": "https://huggingface.co/allenai/led-base-16384/resolve/main/merges.txt",
},
"tokenizer_file": {
"allenai/led-base-16384": "https://huggingface.co/allenai/led-base-16384/resolve/main/tokenizer.json",
},
}
lowerCAmelCase_ = {
"allenai/led-base-16384": 1_6_3_8_4,
}
class lowerCamelCase ( __lowerCAmelCase ):
snake_case_ = VOCAB_FILES_NAMES
snake_case_ = PRETRAINED_VOCAB_FILES_MAP
snake_case_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
snake_case_ = LEDTokenizer
snake_case_ = ['''input_ids''', '''attention_mask''']
def __init__( self, lowercase_=None, lowercase_=None, lowercase_=None, lowercase_="replace", lowercase_="<s>", lowercase_="</s>", lowercase_="</s>", lowercase_="<s>", lowercase_="<unk>", lowercase_="<pad>", lowercase_="<mask>", lowercase_=False, lowercase_=True, **lowercase_, ) -> int:
super().__init__(
lowercase_, lowercase_, tokenizer_file=lowercase_, errors=lowercase_, bos_token=lowercase_, eos_token=lowercase_, sep_token=lowercase_, cls_token=lowercase_, unk_token=lowercase_, pad_token=lowercase_, mask_token=lowercase_, add_prefix_space=lowercase_, trim_offsets=lowercase_, **lowercase_, )
snake_case = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('add_prefix_space', lowercase_ ) != add_prefix_space:
snake_case = getattr(lowercase_, pre_tok_state.pop('type' ) )
snake_case = add_prefix_space
snake_case = pre_tok_class(**lowercase_ )
snake_case = add_prefix_space
# the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__`
snake_case = 'post_processor'
snake_case = getattr(self.backend_tokenizer, lowercase_, lowercase_ )
if tokenizer_component_instance:
snake_case = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
snake_case = tuple(state['sep'] )
if "cls" in state:
snake_case = tuple(state['cls'] )
snake_case = False
if state.get('add_prefix_space', lowercase_ ) != add_prefix_space:
snake_case = add_prefix_space
snake_case = True
if state.get('trim_offsets', lowercase_ ) != trim_offsets:
snake_case = trim_offsets
snake_case = True
if changes_to_apply:
snake_case = getattr(lowercase_, state.pop('type' ) )
snake_case = component_class(**lowercase_ )
setattr(self.backend_tokenizer, lowercase_, lowercase_ )
@property
# Copied from transformers.models.bart.tokenization_bart_fast.BartTokenizerFast.mask_token with BART->LED
def _lowerCamelCase ( self ) -> str:
if self._mask_token is None:
if self.verbose:
logger.error('Using mask_token, but it is not set yet.' )
return None
return str(self._mask_token )
@mask_token.setter
def _lowerCamelCase ( self, lowercase_ ) -> Any:
snake_case = AddedToken(lowercase_, lstrip=lowercase_, rstrip=lowercase_ ) if isinstance(lowercase_, lowercase_ ) else value
snake_case = value
def _lowerCamelCase ( self, *lowercase_, **lowercase_ ) -> BatchEncoding:
snake_case = kwargs.get('is_split_into_words', lowercase_ )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
F'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
'to use it with pretokenized inputs.' )
return super()._batch_encode_plus(*lowercase_, **lowercase_ )
def _lowerCamelCase ( self, *lowercase_, **lowercase_ ) -> BatchEncoding:
snake_case = kwargs.get('is_split_into_words', lowercase_ )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
F'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
'to use it with pretokenized inputs.' )
return super()._encode_plus(*lowercase_, **lowercase_ )
def _lowerCamelCase ( self, lowercase_, lowercase_ = None ) -> Tuple[str]:
snake_case = self._tokenizer.model.save(lowercase_, name=lowercase_ )
return tuple(lowercase_ )
def _lowerCamelCase ( self, lowercase_, lowercase_=None ) -> Dict:
snake_case = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def _lowerCamelCase ( self, lowercase_, lowercase_ = None ) -> List[int]:
snake_case = [self.sep_token_id]
snake_case = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def _lowerCamelCase ( self, lowercase_, lowercase_ = None, lowercase_ = PaddingStrategy.DO_NOT_PAD, lowercase_ = None, lowercase_ = None, ) -> dict:
snake_case = super()._pad(
encoded_inputs=lowercase_, max_length=lowercase_, padding_strategy=lowercase_, pad_to_multiple_of=lowercase_, return_attention_mask=lowercase_, )
# Load from model defaults
if return_attention_mask is None:
snake_case = 'attention_mask' in self.model_input_names
if return_attention_mask and "global_attention_mask" in encoded_inputs:
snake_case = encoded_inputs[self.model_input_names[0]]
# `global_attention_mask` need to have the same length as other (sequential) inputs.
snake_case = len(encoded_inputs['global_attention_mask'] ) != len(lowercase_ )
if needs_to_be_padded:
snake_case = len(lowercase_ ) - len(encoded_inputs['global_attention_mask'] )
if self.padding_side == "right":
# Use `-1` since `0` in `global_attention_mask` means `local attention` instead of `not to attend`
snake_case = (
encoded_inputs['global_attention_mask'] + [-1] * difference
)
elif self.padding_side == "left":
snake_case = [-1] * difference + encoded_inputs[
'global_attention_mask'
]
else:
raise ValueError('Invalid padding strategy:' + str(self.padding_side ) )
return encoded_inputs
| 332 | 1 |
"""simple docstring"""
import re
from flax.core.frozen_dict import freeze
from flax.traverse_util import flatten_dict, unflatten_dict
from jax.experimental import PartitionSpec as P
# Sentinels
A : int = object()
# For specifying empty leaf dict `{}`
A : Dict = object()
def _lowerCamelCase ( _UpperCamelCase , _UpperCamelCase ):
'''simple docstring'''
__lowerCAmelCase = tuple((re.compile(x + "$" ) for x in qs) )
for i in range(len(UpperCamelCase__ ) - len(UpperCamelCase__ ) + 1 ):
__lowerCAmelCase = [x.match(UpperCamelCase__ ) for x, y in zip(UpperCamelCase__ , ks[i:] )]
if matches and all(UpperCamelCase__ ):
return True
return False
def _lowerCamelCase ( _UpperCamelCase ):
'''simple docstring'''
def replace(_UpperCamelCase , _UpperCamelCase ):
for rule, replacement in rules:
if _match(UpperCamelCase__ , UpperCamelCase__ ):
return replacement
return val
return replace
def _lowerCamelCase ( ):
'''simple docstring'''
return [
# embeddings
(("transformer", "wpe", "embedding"), P("mp" , UpperCamelCase__ )),
(("transformer", "wte", "embedding"), P("mp" , UpperCamelCase__ )),
# atention
(("attention", "(q_proj|k_proj|v_proj)", "kernel"), P(UpperCamelCase__ , "mp" )),
(("attention", "out_proj", "kernel"), P("mp" , UpperCamelCase__ )),
(("attention", "out_proj", "bias"), None),
# mlp
(("mlp", "c_fc", "kernel"), P(UpperCamelCase__ , "mp" )),
(("mlp", "c_fc", "bias"), P("mp" )),
(("mlp", "c_proj", "kernel"), P("mp" , UpperCamelCase__ )),
(("mlp", "c_proj", "bias"), None),
# layer norms
((r"ln_\d+", "bias"), None),
((r"\d+", r"ln_\d+", "scale"), None),
(("ln_f", "bias"), None),
(("ln_f", "scale"), None),
]
def _lowerCamelCase ( _UpperCamelCase ):
'''simple docstring'''
__lowerCAmelCase = _get_partition_rules()
__lowerCAmelCase = _replacement_rules(UpperCamelCase__ )
__lowerCAmelCase = {k: _unmatched for k in flatten_dict(UpperCamelCase__ )}
__lowerCAmelCase = {k: replace(UpperCamelCase__ , UpperCamelCase__ ) for k, v in initd.items()}
assert _unmatched not in result.values(), "Incomplete partition spec."
return freeze(unflatten_dict(UpperCamelCase__ ) )
| 57 |
import os
import tempfile
import unittest
from pathlib import Path
from transformers import AutoConfig, is_torch_available
from transformers.testing_utils import require_torch, torch_device
if is_torch_available():
from transformers import PyTorchBenchmark, PyTorchBenchmarkArguments
@require_torch
class lowerCAmelCase__ ( unittest.TestCase):
'''simple docstring'''
def _lowerCamelCase ( self , __lowerCamelCase) -> Dict:
for model_result in results.values():
for batch_size, sequence_length in zip(model_result["bs"] , model_result["ss"]):
_A : Optional[int] = model_result["result"][batch_size][sequence_length]
self.assertIsNotNone(__lowerCamelCase)
def _lowerCamelCase ( self) -> int:
_A : Optional[int] = "sshleifer/tiny-gpt2"
_A : int = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=__lowerCamelCase , inference=__lowerCamelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__lowerCamelCase , )
_A : List[str] = PyTorchBenchmark(__lowerCamelCase)
_A : Optional[Any] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result)
self.check_results_dict_not_empty(results.memory_inference_result)
def _lowerCamelCase ( self) -> Dict:
_A : int = "sgugger/tiny-distilbert-classification"
_A : str = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=__lowerCamelCase , inference=__lowerCamelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__lowerCamelCase , only_pretrain_model=__lowerCamelCase , )
_A : Dict = PyTorchBenchmark(__lowerCamelCase)
_A : Union[str, Any] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result)
self.check_results_dict_not_empty(results.memory_inference_result)
def _lowerCamelCase ( self) -> Optional[Any]:
_A : Tuple = "sshleifer/tiny-gpt2"
_A : Optional[Any] = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=__lowerCamelCase , inference=__lowerCamelCase , torchscript=__lowerCamelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__lowerCamelCase , )
_A : Union[str, Any] = PyTorchBenchmark(__lowerCamelCase)
_A : List[str] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result)
self.check_results_dict_not_empty(results.memory_inference_result)
@unittest.skipIf(torch_device == "cpu" , "Cant do half precision")
def _lowerCamelCase ( self) -> int:
_A : Any = "sshleifer/tiny-gpt2"
_A : Tuple = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=__lowerCamelCase , inference=__lowerCamelCase , fpaa=__lowerCamelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__lowerCamelCase , )
_A : Any = PyTorchBenchmark(__lowerCamelCase)
_A : Union[str, Any] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result)
self.check_results_dict_not_empty(results.memory_inference_result)
def _lowerCamelCase ( self) -> Any:
_A : Union[str, Any] = "sshleifer/tiny-gpt2"
_A : Any = AutoConfig.from_pretrained(__lowerCamelCase)
# set architectures equal to `None`
_A : Dict = None
_A : Any = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=__lowerCamelCase , inference=__lowerCamelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__lowerCamelCase , )
_A : Union[str, Any] = PyTorchBenchmark(__lowerCamelCase , configs=[config])
_A : int = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result)
self.check_results_dict_not_empty(results.memory_inference_result)
def _lowerCamelCase ( self) -> int:
_A : List[Any] = "sshleifer/tiny-gpt2"
_A : int = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=__lowerCamelCase , inference=__lowerCamelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__lowerCamelCase , )
_A : Optional[Any] = PyTorchBenchmark(__lowerCamelCase)
_A : int = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result)
self.check_results_dict_not_empty(results.memory_train_result)
@unittest.skipIf(torch_device == "cpu" , "Can't do half precision")
def _lowerCamelCase ( self) -> Optional[Any]:
_A : Any = "sshleifer/tiny-gpt2"
_A : Tuple = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=__lowerCamelCase , inference=__lowerCamelCase , sequence_lengths=[8] , batch_sizes=[1] , fpaa=__lowerCamelCase , multi_process=__lowerCamelCase , )
_A : List[Any] = PyTorchBenchmark(__lowerCamelCase)
_A : Tuple = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result)
self.check_results_dict_not_empty(results.memory_train_result)
def _lowerCamelCase ( self) -> str:
_A : List[str] = "sshleifer/tiny-gpt2"
_A : Union[str, Any] = AutoConfig.from_pretrained(__lowerCamelCase)
_A : Any = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=__lowerCamelCase , inference=__lowerCamelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__lowerCamelCase , )
_A : Optional[Any] = PyTorchBenchmark(__lowerCamelCase , configs=[config])
_A : Tuple = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result)
self.check_results_dict_not_empty(results.memory_inference_result)
def _lowerCamelCase ( self) -> int:
_A : Tuple = "sshleifer/tinier_bart"
_A : Optional[Any] = AutoConfig.from_pretrained(__lowerCamelCase)
_A : Optional[int] = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=__lowerCamelCase , inference=__lowerCamelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__lowerCamelCase , )
_A : Dict = PyTorchBenchmark(__lowerCamelCase , configs=[config])
_A : Optional[int] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result)
self.check_results_dict_not_empty(results.memory_inference_result)
def _lowerCamelCase ( self) -> str:
_A : List[Any] = "sshleifer/tiny-gpt2"
_A : Optional[Any] = AutoConfig.from_pretrained(__lowerCamelCase)
_A : Optional[Any] = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=__lowerCamelCase , inference=__lowerCamelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__lowerCamelCase , )
_A : List[str] = PyTorchBenchmark(__lowerCamelCase , configs=[config])
_A : Dict = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result)
self.check_results_dict_not_empty(results.memory_train_result)
def _lowerCamelCase ( self) -> int:
_A : int = "sshleifer/tinier_bart"
_A : str = AutoConfig.from_pretrained(__lowerCamelCase)
_A : Tuple = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=__lowerCamelCase , inference=__lowerCamelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__lowerCamelCase , )
_A : Tuple = PyTorchBenchmark(__lowerCamelCase , configs=[config])
_A : Union[str, Any] = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result)
self.check_results_dict_not_empty(results.memory_train_result)
def _lowerCamelCase ( self) -> Dict:
_A : List[str] = "sshleifer/tiny-gpt2"
with tempfile.TemporaryDirectory() as tmp_dir:
_A : Optional[Any] = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=__lowerCamelCase , inference=__lowerCamelCase , save_to_csv=__lowerCamelCase , sequence_lengths=[8] , batch_sizes=[1] , inference_time_csv_file=os.path.join(__lowerCamelCase , "inf_time.csv") , train_memory_csv_file=os.path.join(__lowerCamelCase , "train_mem.csv") , inference_memory_csv_file=os.path.join(__lowerCamelCase , "inf_mem.csv") , train_time_csv_file=os.path.join(__lowerCamelCase , "train_time.csv") , env_info_csv_file=os.path.join(__lowerCamelCase , "env.csv") , multi_process=__lowerCamelCase , )
_A : Tuple = PyTorchBenchmark(__lowerCamelCase)
benchmark.run()
self.assertTrue(Path(os.path.join(__lowerCamelCase , "inf_time.csv")).exists())
self.assertTrue(Path(os.path.join(__lowerCamelCase , "train_time.csv")).exists())
self.assertTrue(Path(os.path.join(__lowerCamelCase , "inf_mem.csv")).exists())
self.assertTrue(Path(os.path.join(__lowerCamelCase , "train_mem.csv")).exists())
self.assertTrue(Path(os.path.join(__lowerCamelCase , "env.csv")).exists())
def _lowerCamelCase ( self) -> int:
_A : Dict = "sshleifer/tiny-gpt2"
def _check_summary_is_not_empty(__lowerCamelCase):
self.assertTrue(hasattr(__lowerCamelCase , "sequential"))
self.assertTrue(hasattr(__lowerCamelCase , "cumulative"))
self.assertTrue(hasattr(__lowerCamelCase , "current"))
self.assertTrue(hasattr(__lowerCamelCase , "total"))
with tempfile.TemporaryDirectory() as tmp_dir:
_A : Union[str, Any] = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=__lowerCamelCase , inference=__lowerCamelCase , sequence_lengths=[8] , batch_sizes=[1] , log_filename=os.path.join(__lowerCamelCase , "log.txt") , log_print=__lowerCamelCase , trace_memory_line_by_line=__lowerCamelCase , multi_process=__lowerCamelCase , )
_A : Optional[int] = PyTorchBenchmark(__lowerCamelCase)
_A : Dict = benchmark.run()
_check_summary_is_not_empty(result.inference_summary)
_check_summary_is_not_empty(result.train_summary)
self.assertTrue(Path(os.path.join(__lowerCamelCase , "log.txt")).exists())
| 11 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase = logging.get_logger(__name__)
UpperCamelCase = {
'''SCUT-DLVCLab/lilt-roberta-en-base''': (
'''https://huggingface.co/SCUT-DLVCLab/lilt-roberta-en-base/resolve/main/config.json'''
),
}
class __UpperCAmelCase (_UpperCAmelCase ):
__snake_case : Optional[int] = "lilt"
def __init__( self: List[Any] , UpperCAmelCase_: Union[str, Any]=30_522 , UpperCAmelCase_: str=768 , UpperCAmelCase_: int=12 , UpperCAmelCase_: Optional[Any]=12 , UpperCAmelCase_: Any=3_072 , UpperCAmelCase_: Tuple="gelu" , UpperCAmelCase_: List[str]=0.1 , UpperCAmelCase_: List[str]=0.1 , UpperCAmelCase_: Dict=512 , UpperCAmelCase_: int=2 , UpperCAmelCase_: Dict=0.02 , UpperCAmelCase_: Tuple=1E-12 , UpperCAmelCase_: Optional[Any]=0 , UpperCAmelCase_: Any="absolute" , UpperCAmelCase_: Optional[int]=None , UpperCAmelCase_: List[Any]=4 , UpperCAmelCase_: List[Any]=1_024 , **UpperCAmelCase_: List[str] , ):
'''simple docstring'''
super().__init__(pad_token_id=UpperCAmelCase_ , **UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = vocab_size
_SCREAMING_SNAKE_CASE = hidden_size
_SCREAMING_SNAKE_CASE = num_hidden_layers
_SCREAMING_SNAKE_CASE = num_attention_heads
_SCREAMING_SNAKE_CASE = hidden_act
_SCREAMING_SNAKE_CASE = intermediate_size
_SCREAMING_SNAKE_CASE = hidden_dropout_prob
_SCREAMING_SNAKE_CASE = attention_probs_dropout_prob
_SCREAMING_SNAKE_CASE = max_position_embeddings
_SCREAMING_SNAKE_CASE = type_vocab_size
_SCREAMING_SNAKE_CASE = initializer_range
_SCREAMING_SNAKE_CASE = layer_norm_eps
_SCREAMING_SNAKE_CASE = position_embedding_type
_SCREAMING_SNAKE_CASE = classifier_dropout
_SCREAMING_SNAKE_CASE = channel_shrink_ratio
_SCREAMING_SNAKE_CASE = max_ad_position_embeddings
| 352 |
from maths.is_square_free import is_square_free
from maths.prime_factors import prime_factors
def __lowerCamelCase ( snake_case__ ) -> int:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = prime_factors(snake_case__ )
if is_square_free(snake_case__ ):
return -1 if len(snake_case__ ) % 2 else 1
return 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 125 | 0 |
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from diffusers import (
DDIMScheduler,
KandinskyVaaImgaImgPipeline,
KandinskyVaaPriorPipeline,
UNetaDConditionModel,
VQModel,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class _lowerCAmelCase ( lowercase ,unittest.TestCase ):
"""simple docstring"""
__UpperCAmelCase : int = KandinskyVaaImgaImgPipeline
__UpperCAmelCase : Optional[Any] = ["image_embeds", "negative_image_embeds", "image"]
__UpperCAmelCase : int = [
"image_embeds",
"negative_image_embeds",
"image",
]
__UpperCAmelCase : List[str] = [
"generator",
"height",
"width",
"strength",
"guidance_scale",
"num_inference_steps",
"return_dict",
"guidance_scale",
"num_images_per_prompt",
"output_type",
"return_dict",
]
__UpperCAmelCase : List[Any] = False
@property
def _lowercase ( self : Optional[int] ):
return 3_2
@property
def _lowercase ( self : Union[str, Any] ):
return 3_2
@property
def _lowercase ( self : Optional[int] ):
return self.time_input_dim
@property
def _lowercase ( self : Any ):
return self.time_input_dim * 4
@property
def _lowercase ( self : Any ):
return 1_0_0
@property
def _lowercase ( self : Any ):
torch.manual_seed(0 )
__lowercase = {
"in_channels": 4,
# Out channels is double in channels because predicts mean and variance
"out_channels": 8,
"addition_embed_type": "image",
"down_block_types": ("ResnetDownsampleBlock2D", "SimpleCrossAttnDownBlock2D"),
"up_block_types": ("SimpleCrossAttnUpBlock2D", "ResnetUpsampleBlock2D"),
"mid_block_type": "UNetMidBlock2DSimpleCrossAttn",
"block_out_channels": (self.block_out_channels_a, self.block_out_channels_a * 2),
"layers_per_block": 1,
"encoder_hid_dim": self.text_embedder_hidden_size,
"encoder_hid_dim_type": "image_proj",
"cross_attention_dim": self.cross_attention_dim,
"attention_head_dim": 4,
"resnet_time_scale_shift": "scale_shift",
"class_embed_type": None,
}
__lowercase = UNetaDConditionModel(**UpperCAmelCase__ )
return model
@property
def _lowercase ( self : Optional[Any] ):
return {
"block_out_channels": [3_2, 6_4],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 1_2,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def _lowercase ( self : Optional[Any] ):
torch.manual_seed(0 )
__lowercase = VQModel(**self.dummy_movq_kwargs )
return model
def _lowercase ( self : Union[str, Any] ):
__lowercase = self.dummy_unet
__lowercase = self.dummy_movq
__lowercase = {
"num_train_timesteps": 1_0_0_0,
"beta_schedule": "linear",
"beta_start": 0.00_085,
"beta_end": 0.012,
"clip_sample": False,
"set_alpha_to_one": False,
"steps_offset": 0,
"prediction_type": "epsilon",
"thresholding": False,
}
__lowercase = DDIMScheduler(**UpperCAmelCase__ )
__lowercase = {
"unet": unet,
"scheduler": scheduler,
"movq": movq,
}
return components
def _lowercase ( self : Union[str, Any], UpperCAmelCase__ : List[Any], UpperCAmelCase__ : List[Any]=0 ):
__lowercase = floats_tensor((1, self.text_embedder_hidden_size), rng=random.Random(UpperCAmelCase__ ) ).to(UpperCAmelCase__ )
__lowercase = floats_tensor((1, self.text_embedder_hidden_size), rng=random.Random(seed + 1 ) ).to(
UpperCAmelCase__ )
# create init_image
__lowercase = floats_tensor((1, 3, 6_4, 6_4), rng=random.Random(UpperCAmelCase__ ) ).to(UpperCAmelCase__ )
__lowercase = image.cpu().permute(0, 2, 3, 1 )[0]
__lowercase = Image.fromarray(np.uinta(UpperCAmelCase__ ) ).convert("RGB" ).resize((2_5_6, 2_5_6) )
if str(UpperCAmelCase__ ).startswith("mps" ):
__lowercase = torch.manual_seed(UpperCAmelCase__ )
else:
__lowercase = torch.Generator(device=UpperCAmelCase__ ).manual_seed(UpperCAmelCase__ )
__lowercase = {
"image": init_image,
"image_embeds": image_embeds,
"negative_image_embeds": negative_image_embeds,
"generator": generator,
"height": 6_4,
"width": 6_4,
"num_inference_steps": 1_0,
"guidance_scale": 7.0,
"strength": 0.2,
"output_type": "np",
}
return inputs
def _lowercase ( self : Optional[Any] ):
__lowercase = "cpu"
__lowercase = self.get_dummy_components()
__lowercase = self.pipeline_class(**UpperCAmelCase__ )
__lowercase = pipe.to(UpperCAmelCase__ )
pipe.set_progress_bar_config(disable=UpperCAmelCase__ )
__lowercase = pipe(**self.get_dummy_inputs(UpperCAmelCase__ ) )
__lowercase = output.images
__lowercase = pipe(
**self.get_dummy_inputs(UpperCAmelCase__ ), return_dict=UpperCAmelCase__, )[0]
__lowercase = image[0, -3:, -3:, -1]
__lowercase = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 6_4, 6_4, 3)
__lowercase = np.array(
[0.6_199_778, 0.63_984_406, 0.46_145_785, 0.62_944_984, 0.5_622_215, 0.47_306_132, 0.47_441_456, 0.4_607_606, 0.48_719_263] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
), F""" expected_slice {expected_slice}, but got {image_slice.flatten()}"""
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
), F""" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}"""
@slow
@require_torch_gpu
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def _lowercase ( self : Optional[int] ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _lowercase ( self : Dict ):
__lowercase = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/kandinskyv22/kandinskyv22_img2img_frog.npy" )
__lowercase = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/kandinsky/cat.png" )
__lowercase = "A red cartoon frog, 4k"
__lowercase = KandinskyVaaPriorPipeline.from_pretrained(
"kandinsky-community/kandinsky-2-2-prior", torch_dtype=torch.floataa )
pipe_prior.to(UpperCAmelCase__ )
__lowercase = KandinskyVaaImgaImgPipeline.from_pretrained(
"kandinsky-community/kandinsky-2-2-decoder", torch_dtype=torch.floataa )
__lowercase = pipeline.to(UpperCAmelCase__ )
pipeline.set_progress_bar_config(disable=UpperCAmelCase__ )
__lowercase = torch.Generator(device="cpu" ).manual_seed(0 )
__lowercase ,__lowercase = pipe_prior(
UpperCAmelCase__, generator=UpperCAmelCase__, num_inference_steps=5, negative_prompt="", ).to_tuple()
__lowercase = pipeline(
image=UpperCAmelCase__, image_embeds=UpperCAmelCase__, negative_image_embeds=UpperCAmelCase__, generator=UpperCAmelCase__, num_inference_steps=1_0_0, height=7_6_8, width=7_6_8, strength=0.2, output_type="np", )
__lowercase = output.images[0]
assert image.shape == (7_6_8, 7_6_8, 3)
assert_mean_pixel_difference(UpperCAmelCase__, UpperCAmelCase__ )
| 17 |
"""simple docstring"""
import json
import os
import subprocess
import unittest
from ast import literal_eval
import pytest
from parameterized import parameterized, parameterized_class
from . import is_sagemaker_available
if is_sagemaker_available():
from sagemaker import Session, TrainingJobAnalytics
from sagemaker.huggingface import HuggingFace
@pytest.mark.skipif(
literal_eval(os.getenv("TEST_SAGEMAKER" ,"False" ) ) is not True ,reason="Skipping test because should only be run when releasing minor transformers version" ,)
@pytest.mark.usefixtures("sm_env" )
@parameterized_class(
[
{
"framework": "pytorch",
"script": "run_glue_model_parallelism.py",
"model_name_or_path": "roberta-large",
"instance_type": "ml.p3dn.24xlarge",
"results": {"train_runtime": 1_6_0_0, "eval_accuracy": 0.3, "eval_loss": 1.2},
},
{
"framework": "pytorch",
"script": "run_glue.py",
"model_name_or_path": "roberta-large",
"instance_type": "ml.p3dn.24xlarge",
"results": {"train_runtime": 1_6_0_0, "eval_accuracy": 0.3, "eval_loss": 1.2},
},
] )
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def _lowercase ( self : Optional[int] ):
if self.framework == "pytorch":
subprocess.run(
F"""cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py""".split(), encoding="utf-8", check=UpperCAmelCase__, )
assert hasattr(self, "env" )
def _lowercase ( self : str, UpperCAmelCase__ : List[Any] ):
# configuration for running training on smdistributed Model Parallel
__lowercase = {
"enabled": True,
"processes_per_host": 8,
}
__lowercase = {
"enabled": True,
"parameters": {
"microbatches": 4,
"placement_strategy": "spread",
"pipeline": "interleaved",
"optimize": "speed",
"partitions": 4,
"ddp": True,
},
}
__lowercase = {"smdistributed": {"modelparallel": smp_options}, "mpi": mpi_options}
__lowercase = "trainer" if self.script == "run_glue.py" else "smtrainer"
# creates estimator
return HuggingFace(
entry_point=self.script, source_dir=self.env.test_path, role=self.env.role, image_uri=self.env.image_uri, base_job_name=F"""{self.env.base_job_name}-{instance_count}-smp-{name_extension}""", instance_count=UpperCAmelCase__, instance_type=self.instance_type, debugger_hook_config=UpperCAmelCase__, hyperparameters={
**self.env.hyperparameters,
"model_name_or_path": self.model_name_or_path,
"max_steps": 5_0_0,
}, metric_definitions=self.env.metric_definitions, distribution=UpperCAmelCase__, py_version="py36", )
def _lowercase ( self : Tuple, UpperCAmelCase__ : int ):
TrainingJobAnalytics(UpperCAmelCase__ ).export_csv(F"""{self.env.test_path}/{job_name}_metrics.csv""" )
@parameterized.expand([(1,)] )
def _lowercase ( self : str, UpperCAmelCase__ : Union[str, Any] ):
# create estimator
__lowercase = self.create_estimator(UpperCAmelCase__ )
# run training
estimator.fit()
# result dataframe
__lowercase = TrainingJobAnalytics(estimator.latest_training_job.name ).dataframe()
# extract kpis
__lowercase = list(result_metrics_df[result_metrics_df.metric_name == "eval_accuracy"]["value"] )
__lowercase = list(result_metrics_df[result_metrics_df.metric_name == "eval_loss"]["value"] )
# get train time from SageMaker job, this includes starting, preprocessing, stopping
__lowercase = (
Session().describe_training_job(estimator.latest_training_job.name ).get("TrainingTimeInSeconds", 9_9_9_9_9_9 )
)
# assert kpis
assert train_runtime <= self.results["train_runtime"]
assert all(t >= self.results["eval_accuracy"] for t in eval_accuracy )
assert all(t <= self.results["eval_loss"] for t in eval_loss )
# dump tests result into json file to share in PR
with open(F"""{estimator.latest_training_job.name}.json""", "w" ) as outfile:
json.dump({"train_time": train_runtime, "eval_accuracy": eval_accuracy, "eval_loss": eval_loss}, UpperCAmelCase__ )
| 17 | 1 |
'''simple docstring'''
def _a ( ) -> int:
"""simple docstring"""
__snake_case : Union[str, Any] = 0
for i in range(1 , 1001 ):
total += i**i
return str(_lowerCamelCase )[-10:]
if __name__ == "__main__":
print(solution())
| 371 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
if is_sentencepiece_available():
from ..ta.tokenization_ta import TaTokenizer
else:
from ...utils.dummy_sentencepiece_objects import TaTokenizer
__UpperCamelCase = TaTokenizer
if is_tokenizers_available():
from ..ta.tokenization_ta_fast import TaTokenizerFast
else:
from ...utils.dummy_tokenizers_objects import TaTokenizerFast
__UpperCamelCase = TaTokenizerFast
__UpperCamelCase = {"configuration_mt5": ["MT5Config", "MT5OnnxConfig"]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase = [
"MT5EncoderModel",
"MT5ForConditionalGeneration",
"MT5ForQuestionAnswering",
"MT5Model",
"MT5PreTrainedModel",
"MT5Stack",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase = ["TFMT5EncoderModel", "TFMT5ForConditionalGeneration", "TFMT5Model"]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase = ["FlaxMT5EncoderModel", "FlaxMT5ForConditionalGeneration", "FlaxMT5Model"]
if TYPE_CHECKING:
from .configuration_mta import MTaConfig, MTaOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mta import (
MTaEncoderModel,
MTaForConditionalGeneration,
MTaForQuestionAnswering,
MTaModel,
MTaPreTrainedModel,
MTaStack,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_mta import TFMTaEncoderModel, TFMTaForConditionalGeneration, TFMTaModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_mta import FlaxMTaEncoderModel, FlaxMTaForConditionalGeneration, FlaxMTaModel
else:
import sys
__UpperCamelCase = _LazyModule(
__name__,
globals()["__file__"],
_import_structure,
extra_objects={"MT5Tokenizer": MTaTokenizer, "MT5TokenizerFast": MTaTokenizerFast},
module_spec=__spec__,
)
| 13 | 0 |
'''simple docstring'''
import argparse
import requests
import torch
from PIL import Image
from transformers import ViTMAEConfig, ViTMAEForPreTraining, ViTMAEImageProcessor
def lowerCamelCase ( lowerCAmelCase : int ):
"""simple docstring"""
if "cls_token" in name:
__magic_name__ : Dict = name.replace('cls_token' , 'vit.embeddings.cls_token' )
if "mask_token" in name:
__magic_name__ : Tuple = name.replace('mask_token' , 'decoder.mask_token' )
if "decoder_pos_embed" in name:
__magic_name__ : Tuple = name.replace('decoder_pos_embed' , 'decoder.decoder_pos_embed' )
if "pos_embed" in name and "decoder" not in name:
__magic_name__ : Optional[int] = name.replace('pos_embed' , 'vit.embeddings.position_embeddings' )
if "patch_embed.proj" in name:
__magic_name__ : str = name.replace('patch_embed.proj' , 'vit.embeddings.patch_embeddings.projection' )
if "patch_embed.norm" in name:
__magic_name__ : List[str] = name.replace('patch_embed.norm' , 'vit.embeddings.norm' )
if "decoder_blocks" in name:
__magic_name__ : List[str] = name.replace('decoder_blocks' , 'decoder.decoder_layers' )
if "blocks" in name:
__magic_name__ : Optional[Any] = name.replace('blocks' , 'vit.encoder.layer' )
if "attn.proj" in name:
__magic_name__ : Dict = name.replace('attn.proj' , 'attention.output.dense' )
if "attn" in name:
__magic_name__ : Union[str, Any] = name.replace('attn' , 'attention.self' )
if "norm1" in name:
__magic_name__ : Any = name.replace('norm1' , 'layernorm_before' )
if "norm2" in name:
__magic_name__ : List[str] = name.replace('norm2' , 'layernorm_after' )
if "mlp.fc1" in name:
__magic_name__ : Optional[Any] = name.replace('mlp.fc1' , 'intermediate.dense' )
if "mlp.fc2" in name:
__magic_name__ : Any = name.replace('mlp.fc2' , 'output.dense' )
if "decoder_embed" in name:
__magic_name__ : Dict = name.replace('decoder_embed' , 'decoder.decoder_embed' )
if "decoder_norm" in name:
__magic_name__ : int = name.replace('decoder_norm' , 'decoder.decoder_norm' )
if "decoder_pred" in name:
__magic_name__ : Tuple = name.replace('decoder_pred' , 'decoder.decoder_pred' )
if "norm.weight" in name and "decoder" not in name:
__magic_name__ : Union[str, Any] = name.replace('norm.weight' , 'vit.layernorm.weight' )
if "norm.bias" in name and "decoder" not in name:
__magic_name__ : Optional[Any] = name.replace('norm.bias' , 'vit.layernorm.bias' )
return name
def lowerCamelCase ( lowerCAmelCase : Tuple , lowerCAmelCase : Optional[int] ):
"""simple docstring"""
for key in orig_state_dict.copy().keys():
__magic_name__ : Union[str, Any] = orig_state_dict.pop(lowerCamelCase_ )
if "qkv" in key:
__magic_name__ : Union[str, Any] = key.split('.' )
__magic_name__ : str = int(key_split[1] )
if "decoder_blocks" in key:
__magic_name__ : int = config.decoder_hidden_size
__magic_name__ : Union[str, Any] = """decoder.decoder_layers."""
if "weight" in key:
__magic_name__ : Union[str, Any] = val[:dim, :]
__magic_name__ : List[str] = val[dim : dim * 2, :]
__magic_name__ : int = val[-dim:, :]
elif "bias" in key:
__magic_name__ : Tuple = val[:dim]
__magic_name__ : List[Any] = val[dim : dim * 2]
__magic_name__ : Dict = val[-dim:]
else:
__magic_name__ : Tuple = config.hidden_size
__magic_name__ : List[Any] = """vit.encoder.layer."""
if "weight" in key:
__magic_name__ : Optional[int] = val[:dim, :]
__magic_name__ : Dict = val[dim : dim * 2, :]
__magic_name__ : str = val[-dim:, :]
elif "bias" in key:
__magic_name__ : Optional[Any] = val[:dim]
__magic_name__ : List[Any] = val[dim : dim * 2]
__magic_name__ : Optional[int] = val[-dim:]
else:
__magic_name__ : List[Any] = val
return orig_state_dict
def lowerCamelCase ( lowerCAmelCase : List[str] , lowerCAmelCase : Any ):
"""simple docstring"""
__magic_name__ : Union[str, Any] = ViTMAEConfig()
if "large" in checkpoint_url:
__magic_name__ : Tuple = 1024
__magic_name__ : Optional[Any] = 4096
__magic_name__ : Any = 24
__magic_name__ : Optional[int] = 16
elif "huge" in checkpoint_url:
__magic_name__ : Dict = 14
__magic_name__ : Union[str, Any] = 1280
__magic_name__ : Any = 5120
__magic_name__ : Optional[Any] = 32
__magic_name__ : Union[str, Any] = 16
__magic_name__ : List[str] = ViTMAEForPreTraining(lowerCamelCase_ )
__magic_name__ : Optional[Any] = torch.hub.load_state_dict_from_url(lowerCamelCase_ , map_location='cpu' )["""model"""]
__magic_name__ : Tuple = ViTMAEImageProcessor(size=config.image_size )
__magic_name__ : List[Any] = convert_state_dict(lowerCamelCase_ , lowerCamelCase_ )
model.load_state_dict(lowerCamelCase_ )
model.eval()
__magic_name__ : Optional[Any] = """https://user-images.githubusercontent.com/11435359/147738734-196fd92f-9260-48d5-ba7e-bf103d29364d.jpg"""
__magic_name__ : Optional[Any] = Image.open(requests.get(lowerCamelCase_ , stream=lowerCamelCase_ ).raw )
__magic_name__ : Dict = ViTMAEImageProcessor(size=config.image_size )
__magic_name__ : Any = image_processor(images=lowerCamelCase_ , return_tensors='pt' )
# forward pass
torch.manual_seed(2 )
__magic_name__ : List[str] = model(**lowerCamelCase_ )
__magic_name__ : Optional[int] = outputs.logits
if "large" in checkpoint_url:
__magic_name__ : Tuple = torch.tensor(
[[-0.7309, -0.7128, -1.0169], [-1.0161, -0.9058, -1.1878], [-1.0478, -0.9411, -1.1911]] )
elif "huge" in checkpoint_url:
__magic_name__ : Union[str, Any] = torch.tensor(
[[-1.1599, -0.9199, -1.2221], [-1.1952, -0.9269, -1.2307], [-1.2143, -0.9337, -1.2262]] )
else:
__magic_name__ : Union[str, Any] = torch.tensor(
[[-0.9192, -0.8481, -1.1259], [-1.1349, -1.0034, -1.2599], [-1.1757, -1.0429, -1.2726]] )
# verify logits
assert torch.allclose(logits[0, :3, :3] , lowerCamelCase_ , atol=1e-4 )
print(f'Saving model to {pytorch_dump_folder_path}' )
model.save_pretrained(lowerCamelCase_ )
print(f'Saving image processor to {pytorch_dump_folder_path}' )
image_processor.save_pretrained(lowerCamelCase_ )
if __name__ == "__main__":
lowerCAmelCase :int = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--checkpoint_url''',
default='''https://dl.fbaipublicfiles.com/mae/visualize/mae_visualize_vit_base.pth''',
type=str,
help='''URL of the checkpoint you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
lowerCAmelCase :List[Any] = parser.parse_args()
convert_vit_mae_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path) | 331 |
'''simple docstring'''
from manim import *
class UpperCamelCase__ ( lowercase_ ):
"""simple docstring"""
def lowerCamelCase_ ( self : List[str] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = Rectangle(height=0.5 , width=0.5 )
SCREAMING_SNAKE_CASE : Union[str, Any] = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0 )
SCREAMING_SNAKE_CASE : List[str] = Rectangle(height=0.25 , width=0.25 )
SCREAMING_SNAKE_CASE : Optional[int] = [mem.copy() for i in range(6 )]
SCREAMING_SNAKE_CASE : List[Any] = [mem.copy() for i in range(6 )]
SCREAMING_SNAKE_CASE : Any = VGroup(*lowerCamelCase_ ).arrange(lowerCamelCase_ , buff=0 )
SCREAMING_SNAKE_CASE : str = VGroup(*lowerCamelCase_ ).arrange(lowerCamelCase_ , buff=0 )
SCREAMING_SNAKE_CASE : Tuple = VGroup(lowerCamelCase_ , lowerCamelCase_ ).arrange(lowerCamelCase_ , buff=0 )
SCREAMING_SNAKE_CASE : List[Any] = Text("""CPU""" , font_size=24 )
SCREAMING_SNAKE_CASE : Any = Group(lowerCamelCase_ , lowerCamelCase_ ).arrange(lowerCamelCase_ , buff=0.5 , aligned_edge=lowerCamelCase_ )
cpu.move_to([-2.5, -0.5, 0] )
self.add(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Any = [mem.copy() for i in range(4 )]
SCREAMING_SNAKE_CASE : Any = VGroup(*lowerCamelCase_ ).arrange(lowerCamelCase_ , buff=0 )
SCREAMING_SNAKE_CASE : Optional[Any] = Text("""GPU""" , font_size=24 )
SCREAMING_SNAKE_CASE : Dict = Group(lowerCamelCase_ , lowerCamelCase_ ).arrange(lowerCamelCase_ , buff=0.5 , aligned_edge=lowerCamelCase_ )
gpu.move_to([-1, -1, 0] )
self.add(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : str = [mem.copy() for i in range(6 )]
SCREAMING_SNAKE_CASE : List[Any] = VGroup(*lowerCamelCase_ ).arrange(lowerCamelCase_ , buff=0 )
SCREAMING_SNAKE_CASE : Union[str, Any] = Text("""Model""" , font_size=24 )
SCREAMING_SNAKE_CASE : List[str] = Group(lowerCamelCase_ , lowerCamelCase_ ).arrange(lowerCamelCase_ , buff=0.5 , aligned_edge=lowerCamelCase_ )
model.move_to([3, -1.0, 0] )
self.add(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Union[str, Any] = []
SCREAMING_SNAKE_CASE : Union[str, Any] = []
for i, rect in enumerate(lowerCamelCase_ ):
SCREAMING_SNAKE_CASE : str = fill.copy().set_fill(lowerCamelCase_ , opacity=0.8 )
target.move_to(lowerCamelCase_ )
model_arr.append(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : List[Any] = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0.0 ).set_fill(lowerCamelCase_ , opacity=0.8 )
cpu_target.move_to(cpu_left_col_base[i] )
model_cpu_arr.append(lowerCamelCase_ )
self.add(*lowerCamelCase_ , *lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Any = [meta_mem.copy() for i in range(6 )]
SCREAMING_SNAKE_CASE : Tuple = [meta_mem.copy() for i in range(6 )]
SCREAMING_SNAKE_CASE : Tuple = VGroup(*lowerCamelCase_ ).arrange(lowerCamelCase_ , buff=0 )
SCREAMING_SNAKE_CASE : Optional[int] = VGroup(*lowerCamelCase_ ).arrange(lowerCamelCase_ , buff=0 )
SCREAMING_SNAKE_CASE : Dict = VGroup(lowerCamelCase_ , lowerCamelCase_ ).arrange(lowerCamelCase_ , buff=0 )
SCREAMING_SNAKE_CASE : List[Any] = Text("""Disk""" , font_size=24 )
SCREAMING_SNAKE_CASE : Dict = Group(lowerCamelCase_ , lowerCamelCase_ ).arrange(lowerCamelCase_ , buff=0.5 , aligned_edge=lowerCamelCase_ )
disk.move_to([-4, -1.25, 0] )
self.add(lowerCamelCase_ , lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Optional[int] = Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
SCREAMING_SNAKE_CASE : Optional[Any] = MarkupText(
f'''<b>Key:</b>\n\n<span fgcolor=\'{YELLOW}\'>●</span> Empty Model''' , font_size=18 , )
key_text.move_to([-5, 2.4, 0] )
self.add(lowerCamelCase_ , lowerCamelCase_ )
SCREAMING_SNAKE_CASE : int = MarkupText(
f'''<span fgcolor=\'{BLUE}\'>●</span> Checkpoint''' , font_size=18 , )
blue_text.next_to(lowerCamelCase_ , DOWN * 2.4 , aligned_edge=key_text.get_left() )
self.add(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Optional[int] = MarkupText(
f'''Now watch as an input is passed through the model\nand how the memory is utilized and handled.''' , font_size=24 , )
step_a.move_to([2, 2, 0] )
self.play(Write(lowerCamelCase_ ) )
SCREAMING_SNAKE_CASE : Optional[Any] = Square(0.3 )
input.set_fill(lowerCamelCase_ , opacity=1.0 )
input.set_stroke(width=0.0 )
input.next_to(model_base[0] , lowerCamelCase_ , buff=0.5 )
self.play(Write(lowerCamelCase_ ) )
input.generate_target()
input.target.next_to(model_arr[0] , direction=lowerCamelCase_ , buff=0.02 )
self.play(MoveToTarget(lowerCamelCase_ ) )
self.play(FadeOut(lowerCamelCase_ ) )
SCREAMING_SNAKE_CASE : int = Arrow(start=lowerCamelCase_ , end=lowerCamelCase_ , color=lowerCamelCase_ , buff=0.5 )
a.next_to(model_arr[0].get_left() , lowerCamelCase_ , buff=0.2 )
model_cpu_arr[0].generate_target()
model_cpu_arr[0].target.move_to(gpu_rect[0] )
SCREAMING_SNAKE_CASE : Optional[int] = MarkupText(
f'''As the input reaches a layer, the hook triggers\nand weights are moved from the CPU\nto the GPU and back.''' , font_size=24 , )
step_a.move_to([2, 2, 0] )
self.play(Write(lowerCamelCase_ , run_time=3 ) )
SCREAMING_SNAKE_CASE : Union[str, Any] = {"""run_time""": 1, """fade_in""": True, """fade_out""": True, """buff""": 0.02}
self.play(
Write(lowerCamelCase_ ) , Circumscribe(model_arr[0] , color=lowerCamelCase_ , **lowerCamelCase_ ) , Circumscribe(model_cpu_arr[0] , color=lowerCamelCase_ , **lowerCamelCase_ ) , Circumscribe(gpu_rect[0] , color=lowerCamelCase_ , **lowerCamelCase_ ) , )
self.play(MoveToTarget(model_cpu_arr[0] ) )
SCREAMING_SNAKE_CASE : Optional[int] = a.copy()
for i in range(6 ):
a_c.next_to(model_arr[i].get_right() + 0.02 , lowerCamelCase_ , buff=0.2 )
input.generate_target()
input.target.move_to(model_arr[i].get_right() + 0.02 )
SCREAMING_SNAKE_CASE : Any = AnimationGroup(
FadeOut(lowerCamelCase_ , run_time=0.5 ) , MoveToTarget(lowerCamelCase_ , run_time=0.5 ) , FadeIn(lowerCamelCase_ , run_time=0.5 ) , lag_ratio=0.2 )
self.play(lowerCamelCase_ )
model_cpu_arr[i].generate_target()
model_cpu_arr[i].target.move_to(cpu_left_col_base[i] )
if i < 5:
model_cpu_arr[i + 1].generate_target()
model_cpu_arr[i + 1].target.move_to(gpu_rect[0] )
if i >= 1:
SCREAMING_SNAKE_CASE : Optional[Any] = 0.7
self.play(
Circumscribe(model_arr[i] , **lowerCamelCase_ ) , Circumscribe(cpu_left_col_base[i] , **lowerCamelCase_ ) , Circumscribe(cpu_left_col_base[i + 1] , color=lowerCamelCase_ , **lowerCamelCase_ ) , Circumscribe(gpu_rect[0] , color=lowerCamelCase_ , **lowerCamelCase_ ) , Circumscribe(model_arr[i + 1] , color=lowerCamelCase_ , **lowerCamelCase_ ) , )
if i < 1:
self.play(
MoveToTarget(model_cpu_arr[i] ) , MoveToTarget(model_cpu_arr[i + 1] ) , )
else:
self.play(
MoveToTarget(model_cpu_arr[i] , run_time=0.7 ) , MoveToTarget(model_cpu_arr[i + 1] , run_time=0.7 ) , )
else:
model_cpu_arr[i].generate_target()
model_cpu_arr[i].target.move_to(cpu_left_col_base[-1] )
input.generate_target()
input.target.next_to(model_arr[-1].get_right() , RIGHT + 0.02 , buff=0.2 )
self.play(
Circumscribe(model_arr[-1] , color=lowerCamelCase_ , **lowerCamelCase_ ) , Circumscribe(cpu_left_col_base[-1] , color=lowerCamelCase_ , **lowerCamelCase_ ) , Circumscribe(gpu_rect[0] , color=lowerCamelCase_ , **lowerCamelCase_ ) , )
self.play(MoveToTarget(model_cpu_arr[i] ) )
SCREAMING_SNAKE_CASE : Union[str, Any] = a_c
SCREAMING_SNAKE_CASE : Optional[Any] = a_c.copy()
input.generate_target()
input.target.next_to(model_base[-1] , RIGHT + 0.02 , buff=0.5 )
self.play(
FadeOut(lowerCamelCase_ ) , FadeOut(lowerCamelCase_ , run_time=0.5 ) , )
SCREAMING_SNAKE_CASE : int = MarkupText(f'''Inference on a model too large for GPU memory\nis successfully completed.''' , font_size=24 )
step_a.move_to([2, 2, 0] )
self.play(Write(lowerCamelCase_ , run_time=3 ) , MoveToTarget(lowerCamelCase_ ) )
self.wait()
| 323 | 0 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
__snake_case = logging.get_logger(__name__)
__snake_case = {"vocab_file": "spiece.model"}
__snake_case = {
"vocab_file": {
"bert_for_seq_generation": (
"https://huggingface.co/google/bert_for_seq_generation_L-24_bbc_encoder/resolve/main/spiece.model"
),
}
}
__snake_case = {"bert_for_seq_generation": 512}
class lowercase ( A__ ):
"""simple docstring"""
_a = VOCAB_FILES_NAMES
_a = PRETRAINED_VOCAB_FILES_MAP
_a = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_a = []
_a = ['''input_ids''', '''attention_mask''']
def __init__( self , UpperCamelCase_ , UpperCamelCase_="<s>" , UpperCamelCase_="</s>" , UpperCamelCase_="<unk>" , UpperCamelCase_="<pad>" , UpperCamelCase_="<::::>" , UpperCamelCase_ = None , **UpperCamelCase_ , ):
'''simple docstring'''
UpperCamelCase__ :Any = {} if sp_model_kwargs is None else sp_model_kwargs
# Add extra_ids to the special token list
super().__init__(
bos_token=_snake_case , eos_token=_snake_case , unk_token=_snake_case , pad_token=_snake_case , sep_token=_snake_case , sp_model_kwargs=self.sp_model_kwargs , **_snake_case , )
UpperCamelCase__ :Any = vocab_file
UpperCamelCase__ :Optional[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(_snake_case )
@property
def lowerCAmelCase__ ( self ):
'''simple docstring'''
return self.sp_model.get_piece_size()
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :Dict = {self.convert_ids_to_tokens(_snake_case ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ):
'''simple docstring'''
UpperCamelCase__ :List[Any] = self.__dict__.copy()
UpperCamelCase__ :List[Any] = None
return state
def __setstate__( self , UpperCamelCase_ ):
'''simple docstring'''
UpperCamelCase__ :List[str] = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
UpperCamelCase__ :int = {}
UpperCamelCase__ :Union[str, Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def lowerCAmelCase__ ( self , UpperCamelCase_ ):
'''simple docstring'''
return self.sp_model.encode(_snake_case , out_type=_snake_case )
def lowerCAmelCase__ ( self , UpperCamelCase_ ):
'''simple docstring'''
return self.sp_model.piece_to_id(_snake_case )
def lowerCAmelCase__ ( self , UpperCamelCase_ ):
'''simple docstring'''
UpperCamelCase__ :Any = self.sp_model.IdToPiece(_snake_case )
return token
def lowerCAmelCase__ ( self , UpperCamelCase_ ):
'''simple docstring'''
UpperCamelCase__ :Optional[int] = []
UpperCamelCase__ :Dict = ''''''
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(_snake_case ) + token
UpperCamelCase__ :str = []
else:
current_sub_tokens.append(_snake_case )
out_string += self.sp_model.decode(_snake_case )
return out_string.strip()
def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_ = None ):
'''simple docstring'''
if not os.path.isdir(_snake_case ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
UpperCamelCase__ :Tuple = os.path.join(
_snake_case , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_snake_case ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , _snake_case )
elif not os.path.isfile(self.vocab_file ):
with open(_snake_case , '''wb''' ) as fi:
UpperCamelCase__ :Any = self.sp_model.serialized_model_proto()
fi.write(_snake_case )
return (out_vocab_file,) | 357 |
'''simple docstring'''
import argparse
from collections import defaultdict
import yaml
__snake_case = '''docs/source/en/_toctree.yml'''
def a ( __a ) -> int:
'''simple docstring'''
UpperCamelCase__ :int = defaultdict(__a )
UpperCamelCase__ :int = []
UpperCamelCase__ :int = []
for doc in doc_list:
if "local" in doc:
counts[doc["local"]] += 1
if doc["title"].lower() == "overview":
overview_doc.append({'''local''': doc['''local'''], '''title''': doc['''title''']} )
else:
new_doc_list.append(__a )
UpperCamelCase__ :Union[str, Any] = new_doc_list
UpperCamelCase__ :Tuple = [key for key, value in counts.items() if value > 1]
UpperCamelCase__ :Union[str, Any] = []
for duplicate_key in duplicates:
UpperCamelCase__ :Dict = list({doc['''title'''] for doc in doc_list if doc['''local'''] == duplicate_key} )
if len(__a ) > 1:
raise ValueError(
f'''{duplicate_key} is present several times in the documentation table of content at '''
'''`docs/source/en/_toctree.yml` with different *Title* values. Choose one of those and remove the '''
'''others.''' )
# Only add this once
new_doc.append({'''local''': duplicate_key, '''title''': titles[0]} )
# Add none duplicate-keys
new_doc.extend([doc for doc in doc_list if '''local''' not in counts or counts[doc['''local''']] == 1] )
UpperCamelCase__ :Union[str, Any] = sorted(__a , key=lambda __a : s["title"].lower() )
# "overview" gets special treatment and is always first
if len(__a ) > 1:
raise ValueError('''{doc_list} has two \'overview\' docs which is not allowed.''' )
overview_doc.extend(__a )
# Sort
return overview_doc
def a ( __a=False ) -> Any:
'''simple docstring'''
with open(__a , encoding='''utf-8''' ) as f:
UpperCamelCase__ :Any = yaml.safe_load(f.read() )
# Get to the API doc
UpperCamelCase__ :str = 0
while content[api_idx]["title"] != "API":
api_idx += 1
UpperCamelCase__ :str = content[api_idx]['''sections''']
# Then to the model doc
UpperCamelCase__ :Optional[Any] = 0
while api_doc[scheduler_idx]["title"] != "Schedulers":
scheduler_idx += 1
UpperCamelCase__ :List[Any] = api_doc[scheduler_idx]['''sections''']
UpperCamelCase__ :Union[str, Any] = clean_doc_toc(__a )
UpperCamelCase__ :List[Any] = False
if new_scheduler_doc != scheduler_doc:
UpperCamelCase__ :Optional[int] = True
if overwrite:
UpperCamelCase__ :Dict = new_scheduler_doc
if diff:
if overwrite:
UpperCamelCase__ :Any = api_doc
with open(__a , '''w''' , encoding='''utf-8''' ) as f:
f.write(yaml.dump(__a , allow_unicode=__a ) )
else:
raise ValueError(
'''The model doc part of the table of content is not properly sorted, run `make style` to fix this.''' )
def a ( __a=False ) -> Optional[Any]:
'''simple docstring'''
with open(__a , encoding='''utf-8''' ) as f:
UpperCamelCase__ :str = yaml.safe_load(f.read() )
# Get to the API doc
UpperCamelCase__ :Optional[Any] = 0
while content[api_idx]["title"] != "API":
api_idx += 1
UpperCamelCase__ :Any = content[api_idx]['''sections''']
# Then to the model doc
UpperCamelCase__ :str = 0
while api_doc[pipeline_idx]["title"] != "Pipelines":
pipeline_idx += 1
UpperCamelCase__ :Any = False
UpperCamelCase__ :Union[str, Any] = api_doc[pipeline_idx]['''sections''']
UpperCamelCase__ :Tuple = []
# sort sub pipeline docs
for pipeline_doc in pipeline_docs:
if "section" in pipeline_doc:
UpperCamelCase__ :Dict = pipeline_doc['''section''']
UpperCamelCase__ :Optional[Any] = clean_doc_toc(__a )
if overwrite:
UpperCamelCase__ :Optional[int] = new_sub_pipeline_doc
new_pipeline_docs.append(__a )
# sort overall pipeline doc
UpperCamelCase__ :Optional[Any] = clean_doc_toc(__a )
if new_pipeline_docs != pipeline_docs:
UpperCamelCase__ :int = True
if overwrite:
UpperCamelCase__ :Union[str, Any] = new_pipeline_docs
if diff:
if overwrite:
UpperCamelCase__ :Dict = api_doc
with open(__a , '''w''' , encoding='''utf-8''' ) as f:
f.write(yaml.dump(__a , allow_unicode=__a ) )
else:
raise ValueError(
'''The model doc part of the table of content is not properly sorted, run `make style` to fix this.''' )
if __name__ == "__main__":
__snake_case = argparse.ArgumentParser()
parser.add_argument('''--fix_and_overwrite''', action='''store_true''', help='''Whether to fix inconsistencies.''')
__snake_case = parser.parse_args()
check_scheduler_doc(args.fix_and_overwrite)
check_pipeline_doc(args.fix_and_overwrite) | 219 | 0 |
'''simple docstring'''
from datasets.utils.patching import _PatchedModuleObj, patch_submodule
from . import _test_patching
def a__ ( ) -> Union[str, Any]:
import os as original_os
from os import path as original_path
from os import rename as original_rename
from os.path import dirname as original_dirname
from os.path import join as original_join
assert _test_patching.os is original_os
assert _test_patching.path is original_path
assert _test_patching.join is original_join
assert _test_patching.renamed_os is original_os
assert _test_patching.renamed_path is original_path
assert _test_patching.renamed_join is original_join
UpperCAmelCase__ : Dict = '''__test_patch_submodule_mock__'''
with patch_submodule(_test_patching , '''os.path.join''' , lowerCAmelCase__ ):
# Every way to access os.path.join must be patched, and the rest must stay untouched
# check os.path.join
assert isinstance(_test_patching.os , _PatchedModuleObj )
assert isinstance(_test_patching.os.path , _PatchedModuleObj )
assert _test_patching.os.path.join is mock
# check path.join
assert isinstance(_test_patching.path , _PatchedModuleObj )
assert _test_patching.path.join is mock
# check join
assert _test_patching.join is mock
# check that the other attributes are untouched
assert _test_patching.os.rename is original_rename
assert _test_patching.path.dirname is original_dirname
assert _test_patching.os.path.dirname is original_dirname
# Even renamed modules or objects must be patched
# check renamed_os.path.join
assert isinstance(_test_patching.renamed_os , _PatchedModuleObj )
assert isinstance(_test_patching.renamed_os.path , _PatchedModuleObj )
assert _test_patching.renamed_os.path.join is mock
# check renamed_path.join
assert isinstance(_test_patching.renamed_path , _PatchedModuleObj )
assert _test_patching.renamed_path.join is mock
# check renamed_join
assert _test_patching.renamed_join is mock
# check that the other attributes are untouched
assert _test_patching.renamed_os.rename is original_rename
assert _test_patching.renamed_path.dirname is original_dirname
assert _test_patching.renamed_os.path.dirname is original_dirname
# check that everthing is back to normal when the patch is over
assert _test_patching.os is original_os
assert _test_patching.path is original_path
assert _test_patching.join is original_join
assert _test_patching.renamed_os is original_os
assert _test_patching.renamed_path is original_path
assert _test_patching.renamed_join is original_join
def a__ ( ) -> Any:
assert _test_patching.open is open
UpperCAmelCase__ : Dict = '''__test_patch_submodule_builtin_mock__'''
# _test_patching has "open" in its globals
assert _test_patching.open is open
with patch_submodule(_test_patching , '''open''' , lowerCAmelCase__ ):
assert _test_patching.open is mock
# check that everthing is back to normal when the patch is over
assert _test_patching.open is open
def a__ ( ) -> Optional[Any]:
# pandas.read_csv is not present in _test_patching
UpperCAmelCase__ : str = '''__test_patch_submodule_missing_mock__'''
with patch_submodule(_test_patching , '''pandas.read_csv''' , lowerCAmelCase__ ):
pass
def a__ ( ) -> Any:
# builtin should always be mocked even if they're not in the globals
# in case they're loaded at one point
UpperCAmelCase__ : Union[str, Any] = '''__test_patch_submodule_missing_builtin_mock__'''
# _test_patching doesn't have "len" in its globals
assert getattr(_test_patching , '''len''' , lowerCAmelCase__ ) is None
with patch_submodule(_test_patching , '''len''' , lowerCAmelCase__ ):
assert _test_patching.len is mock
assert _test_patching.len is len
def a__ ( ) -> str:
UpperCAmelCase__ : List[str] = '''__test_patch_submodule_start_and_stop_mock__'''
UpperCAmelCase__ : Tuple = patch_submodule(_test_patching , '''open''' , lowerCAmelCase__ )
assert _test_patching.open is open
patch.start()
assert _test_patching.open is mock
patch.stop()
assert _test_patching.open is open
def a__ ( ) -> Any:
from os import rename as original_rename
from os.path import dirname as original_dirname
from os.path import join as original_join
UpperCAmelCase__ : Union[str, Any] = '''__test_patch_submodule_successive_join__'''
UpperCAmelCase__ : Any = '''__test_patch_submodule_successive_dirname__'''
UpperCAmelCase__ : Optional[int] = '''__test_patch_submodule_successive_rename__'''
assert _test_patching.os.path.join is original_join
assert _test_patching.os.path.dirname is original_dirname
assert _test_patching.os.rename is original_rename
with patch_submodule(_test_patching , '''os.path.join''' , lowerCAmelCase__ ):
with patch_submodule(_test_patching , '''os.rename''' , lowerCAmelCase__ ):
with patch_submodule(_test_patching , '''os.path.dirname''' , lowerCAmelCase__ ):
assert _test_patching.os.path.join is mock_join
assert _test_patching.os.path.dirname is mock_dirname
assert _test_patching.os.rename is mock_rename
# try another order
with patch_submodule(_test_patching , '''os.rename''' , lowerCAmelCase__ ):
with patch_submodule(_test_patching , '''os.path.join''' , lowerCAmelCase__ ):
with patch_submodule(_test_patching , '''os.path.dirname''' , lowerCAmelCase__ ):
assert _test_patching.os.path.join is mock_join
assert _test_patching.os.path.dirname is mock_dirname
assert _test_patching.os.rename is mock_rename
assert _test_patching.os.path.join is original_join
assert _test_patching.os.path.dirname is original_dirname
assert _test_patching.os.rename is original_rename
def a__ ( ) -> Union[str, Any]:
UpperCAmelCase__ : List[Any] = '''__test_patch_submodule_doesnt_exist_mock__'''
with patch_submodule(_test_patching , '''__module_that_doesn_exist__.__attribute_that_doesn_exist__''' , lowerCAmelCase__ ):
pass
with patch_submodule(_test_patching , '''os.__attribute_that_doesn_exist__''' , lowerCAmelCase__ ):
pass
| 181 |
'''simple docstring'''
import unittest
from transformers import DebertaVaConfig, is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
DebertaVaForMaskedLM,
DebertaVaForMultipleChoice,
DebertaVaForQuestionAnswering,
DebertaVaForSequenceClassification,
DebertaVaForTokenClassification,
DebertaVaModel,
)
from transformers.models.deberta_va.modeling_deberta_va import DEBERTA_V2_PRETRAINED_MODEL_ARCHIVE_LIST
class lowerCamelCase_ ( __a ):
def __init__( self : Optional[Any] , _A : Optional[Any] , _A : List[str]=13 , _A : Any=7 , _A : str=True , _A : Any=True , _A : Any=True , _A : Optional[int]=True , _A : int=99 , _A : Optional[int]=32 , _A : List[Any]=5 , _A : Optional[Any]=4 , _A : Dict=37 , _A : Any="gelu" , _A : str=0.1 , _A : int=0.1 , _A : Optional[Any]=512 , _A : Optional[Any]=16 , _A : List[Any]=2 , _A : str=0.0_2 , _A : Optional[Any]=False , _A : Any=True , _A : Dict="None" , _A : List[str]=3 , _A : List[str]=4 , _A : Tuple=None , ):
'''simple docstring'''
UpperCAmelCase__ : Tuple = parent
UpperCAmelCase__ : Union[str, Any] = batch_size
UpperCAmelCase__ : Dict = seq_length
UpperCAmelCase__ : Dict = is_training
UpperCAmelCase__ : Optional[Any] = use_input_mask
UpperCAmelCase__ : Optional[Any] = use_token_type_ids
UpperCAmelCase__ : Union[str, Any] = use_labels
UpperCAmelCase__ : Tuple = vocab_size
UpperCAmelCase__ : Tuple = hidden_size
UpperCAmelCase__ : Any = num_hidden_layers
UpperCAmelCase__ : Tuple = num_attention_heads
UpperCAmelCase__ : List[Any] = intermediate_size
UpperCAmelCase__ : Union[str, Any] = hidden_act
UpperCAmelCase__ : Any = hidden_dropout_prob
UpperCAmelCase__ : Any = attention_probs_dropout_prob
UpperCAmelCase__ : int = max_position_embeddings
UpperCAmelCase__ : Optional[int] = type_vocab_size
UpperCAmelCase__ : Union[str, Any] = type_sequence_label_size
UpperCAmelCase__ : int = initializer_range
UpperCAmelCase__ : Any = num_labels
UpperCAmelCase__ : Optional[Any] = num_choices
UpperCAmelCase__ : List[Any] = relative_attention
UpperCAmelCase__ : int = position_biased_input
UpperCAmelCase__ : str = pos_att_type
UpperCAmelCase__ : Union[str, Any] = scope
def lowercase_ ( self : Tuple ):
'''simple docstring'''
UpperCAmelCase__ : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCAmelCase__ : Any = None
if self.use_input_mask:
UpperCAmelCase__ : Tuple = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
UpperCAmelCase__ : Optional[Any] = None
if self.use_token_type_ids:
UpperCAmelCase__ : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
UpperCAmelCase__ : Dict = None
UpperCAmelCase__ : Any = None
UpperCAmelCase__ : Dict = None
if self.use_labels:
UpperCAmelCase__ : int = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCAmelCase__ : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
UpperCAmelCase__ : Dict = ids_tensor([self.batch_size] , self.num_choices )
UpperCAmelCase__ : Optional[Any] = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowercase_ ( self : List[str] ):
'''simple docstring'''
return DebertaVaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , relative_attention=self.relative_attention , position_biased_input=self.position_biased_input , pos_att_type=self.pos_att_type , )
def lowercase_ ( self : Dict , _A : Optional[int] ):
'''simple docstring'''
self.parent.assertListEqual(list(result.loss.size() ) , [] )
def lowercase_ ( self : int , _A : int , _A : Any , _A : Tuple , _A : List[Any] , _A : str , _A : Union[str, Any] , _A : Any ):
'''simple docstring'''
UpperCAmelCase__ : Optional[Any] = DebertaVaModel(config=_A )
model.to(_A )
model.eval()
UpperCAmelCase__ : str = model(_A , attention_mask=_A , token_type_ids=_A )[0]
UpperCAmelCase__ : List[str] = model(_A , token_type_ids=_A )[0]
UpperCAmelCase__ : List[Any] = model(_A )[0]
self.parent.assertListEqual(list(sequence_output.size() ) , [self.batch_size, self.seq_length, self.hidden_size] )
def lowercase_ ( self : Optional[Any] , _A : Tuple , _A : List[Any] , _A : Optional[Any] , _A : int , _A : List[Any] , _A : Optional[int] , _A : str ):
'''simple docstring'''
UpperCAmelCase__ : Dict = DebertaVaForMaskedLM(config=_A )
model.to(_A )
model.eval()
UpperCAmelCase__ : Any = model(_A , attention_mask=_A , token_type_ids=_A , labels=_A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowercase_ ( self : str , _A : str , _A : Any , _A : Any , _A : List[Any] , _A : Dict , _A : Tuple , _A : Tuple ):
'''simple docstring'''
UpperCAmelCase__ : Optional[Any] = self.num_labels
UpperCAmelCase__ : Union[str, Any] = DebertaVaForSequenceClassification(_A )
model.to(_A )
model.eval()
UpperCAmelCase__ : str = model(_A , attention_mask=_A , token_type_ids=_A , labels=_A )
self.parent.assertListEqual(list(result.logits.size() ) , [self.batch_size, self.num_labels] )
self.check_loss_output(_A )
def lowercase_ ( self : Any , _A : List[str] , _A : List[str] , _A : Optional[int] , _A : Tuple , _A : Dict , _A : List[str] , _A : Optional[int] ):
'''simple docstring'''
UpperCAmelCase__ : Optional[Any] = self.num_labels
UpperCAmelCase__ : int = DebertaVaForTokenClassification(config=_A )
model.to(_A )
model.eval()
UpperCAmelCase__ : List[Any] = model(_A , attention_mask=_A , token_type_ids=_A , labels=_A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowercase_ ( self : str , _A : List[str] , _A : str , _A : Optional[int] , _A : Optional[int] , _A : Union[str, Any] , _A : Dict , _A : Optional[int] ):
'''simple docstring'''
UpperCAmelCase__ : str = DebertaVaForQuestionAnswering(config=_A )
model.to(_A )
model.eval()
UpperCAmelCase__ : int = model(
_A , attention_mask=_A , token_type_ids=_A , start_positions=_A , end_positions=_A , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowercase_ ( self : Any , _A : Tuple , _A : Optional[int] , _A : Optional[int] , _A : str , _A : List[str] , _A : Any , _A : int ):
'''simple docstring'''
UpperCAmelCase__ : Optional[Any] = DebertaVaForMultipleChoice(config=_A )
model.to(_A )
model.eval()
UpperCAmelCase__ : Optional[Any] = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
UpperCAmelCase__ : str = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
UpperCAmelCase__ : Dict = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
UpperCAmelCase__ : List[str] = model(
_A , attention_mask=_A , token_type_ids=_A , labels=_A , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def lowercase_ ( self : Optional[int] ):
'''simple docstring'''
UpperCAmelCase__ : int = self.prepare_config_and_inputs()
(
(
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) ,
) : Any = config_and_inputs
UpperCAmelCase__ : Optional[int] = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class lowerCamelCase_ ( __a , __a , unittest.TestCase ):
lowerCAmelCase__ = (
(
DebertaVaModel,
DebertaVaForMaskedLM,
DebertaVaForSequenceClassification,
DebertaVaForTokenClassification,
DebertaVaForQuestionAnswering,
DebertaVaForMultipleChoice,
)
if is_torch_available()
else ()
)
lowerCAmelCase__ = (
{
'feature-extraction': DebertaVaModel,
'fill-mask': DebertaVaForMaskedLM,
'question-answering': DebertaVaForQuestionAnswering,
'text-classification': DebertaVaForSequenceClassification,
'token-classification': DebertaVaForTokenClassification,
'zero-shot': DebertaVaForSequenceClassification,
}
if is_torch_available()
else {}
)
lowerCAmelCase__ = True
lowerCAmelCase__ = False
lowerCAmelCase__ = False
lowerCAmelCase__ = False
lowerCAmelCase__ = False
def lowercase_ ( self : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase__ : Optional[Any] = DebertaVaModelTester(self )
UpperCAmelCase__ : Tuple = ConfigTester(self , config_class=_A , hidden_size=37 )
def lowercase_ ( self : str ):
'''simple docstring'''
self.config_tester.run_common_tests()
def lowercase_ ( self : Optional[int] ):
'''simple docstring'''
UpperCAmelCase__ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_model(*_A )
def lowercase_ ( self : List[Any] ):
'''simple docstring'''
UpperCAmelCase__ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_sequence_classification(*_A )
def lowercase_ ( self : List[str] ):
'''simple docstring'''
UpperCAmelCase__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_masked_lm(*_A )
def lowercase_ ( self : List[str] ):
'''simple docstring'''
UpperCAmelCase__ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_question_answering(*_A )
def lowercase_ ( self : Any ):
'''simple docstring'''
UpperCAmelCase__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_token_classification(*_A )
def lowercase_ ( self : Tuple ):
'''simple docstring'''
UpperCAmelCase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_multiple_choice(*_A )
@slow
def lowercase_ ( self : List[Any] ):
'''simple docstring'''
for model_name in DEBERTA_V2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase__ : List[str] = DebertaVaModel.from_pretrained(_A )
self.assertIsNotNone(_A )
@require_torch
@require_sentencepiece
@require_tokenizers
class lowerCamelCase_ ( unittest.TestCase ):
@unittest.skip(reason='''Model not available yet''' )
def lowercase_ ( self : Optional[Any] ):
'''simple docstring'''
pass
@slow
def lowercase_ ( self : List[Any] ):
'''simple docstring'''
UpperCAmelCase__ : Tuple = DebertaVaModel.from_pretrained('''microsoft/deberta-v2-xlarge''' )
UpperCAmelCase__ : List[Any] = torch.tensor([[0, 31_414, 232, 328, 740, 1_140, 12_695, 69, 46_078, 1_588, 2]] )
UpperCAmelCase__ : List[str] = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
UpperCAmelCase__ : List[str] = model(_A , attention_mask=_A )[0]
# compare the actual values for a slice.
UpperCAmelCase__ : str = torch.tensor(
[[[0.2_3_5_6, 0.1_9_4_8, 0.0_3_6_9], [-0.1_0_6_3, 0.3_5_8_6, -0.5_1_5_2], [-0.6_3_9_9, -0.0_2_5_9, -0.2_5_2_5]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , _A , atol=1e-4 ) , f"""{output[:, 1:4, 1:4]}""" )
| 181 | 1 |
from ..utils import DummyObject, requires_backends
class UpperCamelCase__ ( metaclass=__lowercase ):
_SCREAMING_SNAKE_CASE : Optional[int] = ["torch"]
def __init__(self : Any , *snake_case_ : Union[str, Any] , **snake_case_ : Optional[Any] ):
requires_backends(self , ['''torch'''] )
@classmethod
def lowerCAmelCase (cls : str , *snake_case_ : List[Any] , **snake_case_ : Tuple ):
requires_backends(cls , ['''torch'''] )
@classmethod
def lowerCAmelCase (cls : Tuple , *snake_case_ : Tuple , **snake_case_ : int ):
requires_backends(cls , ['''torch'''] )
class UpperCamelCase__ ( metaclass=__lowercase ):
_SCREAMING_SNAKE_CASE : Tuple = ["torch"]
def __init__(self : List[Any] , *snake_case_ : List[str] , **snake_case_ : int ):
requires_backends(self , ['''torch'''] )
@classmethod
def lowerCAmelCase (cls : Tuple , *snake_case_ : Tuple , **snake_case_ : Any ):
requires_backends(cls , ['''torch'''] )
@classmethod
def lowerCAmelCase (cls : Union[str, Any] , *snake_case_ : List[str] , **snake_case_ : int ):
requires_backends(cls , ['''torch'''] )
class UpperCamelCase__ ( metaclass=__lowercase ):
_SCREAMING_SNAKE_CASE : Union[str, Any] = ["torch"]
def __init__(self : Dict , *snake_case_ : int , **snake_case_ : Tuple ):
requires_backends(self , ['''torch'''] )
@classmethod
def lowerCAmelCase (cls : List[Any] , *snake_case_ : Union[str, Any] , **snake_case_ : Tuple ):
requires_backends(cls , ['''torch'''] )
@classmethod
def lowerCAmelCase (cls : Optional[Any] , *snake_case_ : Dict , **snake_case_ : List[Any] ):
requires_backends(cls , ['''torch'''] )
class UpperCamelCase__ ( metaclass=__lowercase ):
_SCREAMING_SNAKE_CASE : int = ["torch"]
def __init__(self : int , *snake_case_ : int , **snake_case_ : Union[str, Any] ):
requires_backends(self , ['''torch'''] )
@classmethod
def lowerCAmelCase (cls : Any , *snake_case_ : Optional[int] , **snake_case_ : Tuple ):
requires_backends(cls , ['''torch'''] )
@classmethod
def lowerCAmelCase (cls : int , *snake_case_ : Optional[Any] , **snake_case_ : Optional[Any] ):
requires_backends(cls , ['''torch'''] )
class UpperCamelCase__ ( metaclass=__lowercase ):
_SCREAMING_SNAKE_CASE : int = ["torch"]
def __init__(self : str , *snake_case_ : str , **snake_case_ : Union[str, Any] ):
requires_backends(self , ['''torch'''] )
@classmethod
def lowerCAmelCase (cls : int , *snake_case_ : Union[str, Any] , **snake_case_ : Union[str, Any] ):
requires_backends(cls , ['''torch'''] )
@classmethod
def lowerCAmelCase (cls : Optional[Any] , *snake_case_ : List[str] , **snake_case_ : Dict ):
requires_backends(cls , ['''torch'''] )
class UpperCamelCase__ ( metaclass=__lowercase ):
_SCREAMING_SNAKE_CASE : Any = ["torch"]
def __init__(self : Tuple , *snake_case_ : Dict , **snake_case_ : Tuple ):
requires_backends(self , ['''torch'''] )
@classmethod
def lowerCAmelCase (cls : Union[str, Any] , *snake_case_ : List[str] , **snake_case_ : str ):
requires_backends(cls , ['''torch'''] )
@classmethod
def lowerCAmelCase (cls : List[Any] , *snake_case_ : Optional[Any] , **snake_case_ : str ):
requires_backends(cls , ['''torch'''] )
class UpperCamelCase__ ( metaclass=__lowercase ):
_SCREAMING_SNAKE_CASE : Optional[int] = ["torch"]
def __init__(self : Dict , *snake_case_ : Optional[int] , **snake_case_ : Optional[int] ):
requires_backends(self , ['''torch'''] )
@classmethod
def lowerCAmelCase (cls : Dict , *snake_case_ : Dict , **snake_case_ : Optional[Any] ):
requires_backends(cls , ['''torch'''] )
@classmethod
def lowerCAmelCase (cls : List[Any] , *snake_case_ : str , **snake_case_ : Union[str, Any] ):
requires_backends(cls , ['''torch'''] )
class UpperCamelCase__ ( metaclass=__lowercase ):
_SCREAMING_SNAKE_CASE : Any = ["torch"]
def __init__(self : List[Any] , *snake_case_ : Optional[int] , **snake_case_ : Tuple ):
requires_backends(self , ['''torch'''] )
@classmethod
def lowerCAmelCase (cls : str , *snake_case_ : Optional[Any] , **snake_case_ : Optional[Any] ):
requires_backends(cls , ['''torch'''] )
@classmethod
def lowerCAmelCase (cls : Optional[Any] , *snake_case_ : Dict , **snake_case_ : Optional[Any] ):
requires_backends(cls , ['''torch'''] )
class UpperCamelCase__ ( metaclass=__lowercase ):
_SCREAMING_SNAKE_CASE : Optional[int] = ["torch"]
def __init__(self : int , *snake_case_ : Optional[Any] , **snake_case_ : int ):
requires_backends(self , ['''torch'''] )
@classmethod
def lowerCAmelCase (cls : Optional[int] , *snake_case_ : Union[str, Any] , **snake_case_ : Optional[Any] ):
requires_backends(cls , ['''torch'''] )
@classmethod
def lowerCAmelCase (cls : List[str] , *snake_case_ : Tuple , **snake_case_ : List[str] ):
requires_backends(cls , ['''torch'''] )
class UpperCamelCase__ ( metaclass=__lowercase ):
_SCREAMING_SNAKE_CASE : Optional[Any] = ["torch"]
def __init__(self : List[str] , *snake_case_ : List[Any] , **snake_case_ : Dict ):
requires_backends(self , ['''torch'''] )
@classmethod
def lowerCAmelCase (cls : int , *snake_case_ : Dict , **snake_case_ : Tuple ):
requires_backends(cls , ['''torch'''] )
@classmethod
def lowerCAmelCase (cls : Dict , *snake_case_ : Any , **snake_case_ : Any ):
requires_backends(cls , ['''torch'''] )
class UpperCamelCase__ ( metaclass=__lowercase ):
_SCREAMING_SNAKE_CASE : Tuple = ["torch"]
def __init__(self : List[Any] , *snake_case_ : str , **snake_case_ : Tuple ):
requires_backends(self , ['''torch'''] )
@classmethod
def lowerCAmelCase (cls : str , *snake_case_ : int , **snake_case_ : Optional[Any] ):
requires_backends(cls , ['''torch'''] )
@classmethod
def lowerCAmelCase (cls : Optional[Any] , *snake_case_ : str , **snake_case_ : int ):
requires_backends(cls , ['''torch'''] )
def __UpperCamelCase ( *lowerCAmelCase__ : str , **lowerCAmelCase__ : List[str] ):
requires_backends(lowerCAmelCase__ , ['''torch'''] )
def __UpperCamelCase ( *lowerCAmelCase__ : Tuple , **lowerCAmelCase__ : Dict ):
requires_backends(lowerCAmelCase__ , ['''torch'''] )
def __UpperCamelCase ( *lowerCAmelCase__ : List[str] , **lowerCAmelCase__ : List[str] ):
requires_backends(lowerCAmelCase__ , ['''torch'''] )
def __UpperCamelCase ( *lowerCAmelCase__ : Dict , **lowerCAmelCase__ : str ):
requires_backends(lowerCAmelCase__ , ['''torch'''] )
def __UpperCamelCase ( *lowerCAmelCase__ : Optional[int] , **lowerCAmelCase__ : Optional[int] ):
requires_backends(lowerCAmelCase__ , ['''torch'''] )
def __UpperCamelCase ( *lowerCAmelCase__ : List[Any] , **lowerCAmelCase__ : Optional[int] ):
requires_backends(lowerCAmelCase__ , ['''torch'''] )
def __UpperCamelCase ( *lowerCAmelCase__ : Union[str, Any] , **lowerCAmelCase__ : Optional[Any] ):
requires_backends(lowerCAmelCase__ , ['''torch'''] )
class UpperCamelCase__ ( metaclass=__lowercase ):
_SCREAMING_SNAKE_CASE : List[Any] = ["torch"]
def __init__(self : Optional[Any] , *snake_case_ : str , **snake_case_ : Union[str, Any] ):
requires_backends(self , ['''torch'''] )
@classmethod
def lowerCAmelCase (cls : Optional[int] , *snake_case_ : str , **snake_case_ : Tuple ):
requires_backends(cls , ['''torch'''] )
@classmethod
def lowerCAmelCase (cls : int , *snake_case_ : Optional[int] , **snake_case_ : Any ):
requires_backends(cls , ['''torch'''] )
class UpperCamelCase__ ( metaclass=__lowercase ):
_SCREAMING_SNAKE_CASE : Tuple = ["torch"]
def __init__(self : str , *snake_case_ : Optional[Any] , **snake_case_ : Tuple ):
requires_backends(self , ['''torch'''] )
@classmethod
def lowerCAmelCase (cls : Tuple , *snake_case_ : Optional[int] , **snake_case_ : int ):
requires_backends(cls , ['''torch'''] )
@classmethod
def lowerCAmelCase (cls : Tuple , *snake_case_ : Tuple , **snake_case_ : Optional[Any] ):
requires_backends(cls , ['''torch'''] )
class UpperCamelCase__ ( metaclass=__lowercase ):
_SCREAMING_SNAKE_CASE : Optional[Any] = ["torch"]
def __init__(self : Union[str, Any] , *snake_case_ : Dict , **snake_case_ : int ):
requires_backends(self , ['''torch'''] )
@classmethod
def lowerCAmelCase (cls : List[str] , *snake_case_ : Dict , **snake_case_ : Dict ):
requires_backends(cls , ['''torch'''] )
@classmethod
def lowerCAmelCase (cls : Any , *snake_case_ : Any , **snake_case_ : Union[str, Any] ):
requires_backends(cls , ['''torch'''] )
class UpperCamelCase__ ( metaclass=__lowercase ):
_SCREAMING_SNAKE_CASE : Dict = ["torch"]
def __init__(self : Optional[int] , *snake_case_ : Union[str, Any] , **snake_case_ : Any ):
requires_backends(self , ['''torch'''] )
@classmethod
def lowerCAmelCase (cls : Dict , *snake_case_ : List[Any] , **snake_case_ : str ):
requires_backends(cls , ['''torch'''] )
@classmethod
def lowerCAmelCase (cls : int , *snake_case_ : str , **snake_case_ : List[str] ):
requires_backends(cls , ['''torch'''] )
class UpperCamelCase__ ( metaclass=__lowercase ):
_SCREAMING_SNAKE_CASE : Union[str, Any] = ["torch"]
def __init__(self : Optional[int] , *snake_case_ : Optional[Any] , **snake_case_ : Dict ):
requires_backends(self , ['''torch'''] )
@classmethod
def lowerCAmelCase (cls : Dict , *snake_case_ : List[str] , **snake_case_ : Optional[Any] ):
requires_backends(cls , ['''torch'''] )
@classmethod
def lowerCAmelCase (cls : Tuple , *snake_case_ : Optional[int] , **snake_case_ : str ):
requires_backends(cls , ['''torch'''] )
class UpperCamelCase__ ( metaclass=__lowercase ):
_SCREAMING_SNAKE_CASE : Tuple = ["torch"]
def __init__(self : Union[str, Any] , *snake_case_ : Optional[int] , **snake_case_ : Any ):
requires_backends(self , ['''torch'''] )
@classmethod
def lowerCAmelCase (cls : Any , *snake_case_ : Any , **snake_case_ : Tuple ):
requires_backends(cls , ['''torch'''] )
@classmethod
def lowerCAmelCase (cls : int , *snake_case_ : Optional[int] , **snake_case_ : List[Any] ):
requires_backends(cls , ['''torch'''] )
class UpperCamelCase__ ( metaclass=__lowercase ):
_SCREAMING_SNAKE_CASE : Optional[Any] = ["torch"]
def __init__(self : str , *snake_case_ : str , **snake_case_ : Tuple ):
requires_backends(self , ['''torch'''] )
@classmethod
def lowerCAmelCase (cls : Dict , *snake_case_ : Dict , **snake_case_ : List[Any] ):
requires_backends(cls , ['''torch'''] )
@classmethod
def lowerCAmelCase (cls : Optional[Any] , *snake_case_ : List[Any] , **snake_case_ : List[Any] ):
requires_backends(cls , ['''torch'''] )
class UpperCamelCase__ ( metaclass=__lowercase ):
_SCREAMING_SNAKE_CASE : int = ["torch"]
def __init__(self : Tuple , *snake_case_ : int , **snake_case_ : Tuple ):
requires_backends(self , ['''torch'''] )
@classmethod
def lowerCAmelCase (cls : Dict , *snake_case_ : Optional[Any] , **snake_case_ : List[Any] ):
requires_backends(cls , ['''torch'''] )
@classmethod
def lowerCAmelCase (cls : str , *snake_case_ : int , **snake_case_ : int ):
requires_backends(cls , ['''torch'''] )
class UpperCamelCase__ ( metaclass=__lowercase ):
_SCREAMING_SNAKE_CASE : Tuple = ["torch"]
def __init__(self : Optional[Any] , *snake_case_ : Union[str, Any] , **snake_case_ : Any ):
requires_backends(self , ['''torch'''] )
@classmethod
def lowerCAmelCase (cls : Optional[int] , *snake_case_ : int , **snake_case_ : Optional[Any] ):
requires_backends(cls , ['''torch'''] )
@classmethod
def lowerCAmelCase (cls : Any , *snake_case_ : Optional[int] , **snake_case_ : List[str] ):
requires_backends(cls , ['''torch'''] )
class UpperCamelCase__ ( metaclass=__lowercase ):
_SCREAMING_SNAKE_CASE : int = ["torch"]
def __init__(self : Union[str, Any] , *snake_case_ : Dict , **snake_case_ : Optional[Any] ):
requires_backends(self , ['''torch'''] )
@classmethod
def lowerCAmelCase (cls : int , *snake_case_ : str , **snake_case_ : str ):
requires_backends(cls , ['''torch'''] )
@classmethod
def lowerCAmelCase (cls : List[str] , *snake_case_ : Optional[Any] , **snake_case_ : List[str] ):
requires_backends(cls , ['''torch'''] )
class UpperCamelCase__ ( metaclass=__lowercase ):
_SCREAMING_SNAKE_CASE : int = ["torch"]
def __init__(self : str , *snake_case_ : Optional[int] , **snake_case_ : str ):
requires_backends(self , ['''torch'''] )
@classmethod
def lowerCAmelCase (cls : int , *snake_case_ : Any , **snake_case_ : List[Any] ):
requires_backends(cls , ['''torch'''] )
@classmethod
def lowerCAmelCase (cls : Optional[int] , *snake_case_ : int , **snake_case_ : str ):
requires_backends(cls , ['''torch'''] )
class UpperCamelCase__ ( metaclass=__lowercase ):
_SCREAMING_SNAKE_CASE : Tuple = ["torch"]
def __init__(self : Optional[Any] , *snake_case_ : List[Any] , **snake_case_ : Any ):
requires_backends(self , ['''torch'''] )
@classmethod
def lowerCAmelCase (cls : int , *snake_case_ : List[Any] , **snake_case_ : int ):
requires_backends(cls , ['''torch'''] )
@classmethod
def lowerCAmelCase (cls : List[str] , *snake_case_ : List[str] , **snake_case_ : str ):
requires_backends(cls , ['''torch'''] )
class UpperCamelCase__ ( metaclass=__lowercase ):
_SCREAMING_SNAKE_CASE : List[str] = ["torch"]
def __init__(self : List[str] , *snake_case_ : Union[str, Any] , **snake_case_ : Optional[int] ):
requires_backends(self , ['''torch'''] )
@classmethod
def lowerCAmelCase (cls : List[str] , *snake_case_ : List[Any] , **snake_case_ : Dict ):
requires_backends(cls , ['''torch'''] )
@classmethod
def lowerCAmelCase (cls : Dict , *snake_case_ : Optional[Any] , **snake_case_ : str ):
requires_backends(cls , ['''torch'''] )
class UpperCamelCase__ ( metaclass=__lowercase ):
_SCREAMING_SNAKE_CASE : Dict = ["torch"]
def __init__(self : Union[str, Any] , *snake_case_ : Optional[int] , **snake_case_ : Optional[int] ):
requires_backends(self , ['''torch'''] )
@classmethod
def lowerCAmelCase (cls : Optional[int] , *snake_case_ : Optional[int] , **snake_case_ : Optional[Any] ):
requires_backends(cls , ['''torch'''] )
@classmethod
def lowerCAmelCase (cls : int , *snake_case_ : Optional[Any] , **snake_case_ : Union[str, Any] ):
requires_backends(cls , ['''torch'''] )
class UpperCamelCase__ ( metaclass=__lowercase ):
_SCREAMING_SNAKE_CASE : Any = ["torch"]
def __init__(self : str , *snake_case_ : Any , **snake_case_ : List[str] ):
requires_backends(self , ['''torch'''] )
@classmethod
def lowerCAmelCase (cls : Union[str, Any] , *snake_case_ : int , **snake_case_ : int ):
requires_backends(cls , ['''torch'''] )
@classmethod
def lowerCAmelCase (cls : List[str] , *snake_case_ : Optional[int] , **snake_case_ : Any ):
requires_backends(cls , ['''torch'''] )
class UpperCamelCase__ ( metaclass=__lowercase ):
_SCREAMING_SNAKE_CASE : str = ["torch"]
def __init__(self : int , *snake_case_ : List[str] , **snake_case_ : Tuple ):
requires_backends(self , ['''torch'''] )
@classmethod
def lowerCAmelCase (cls : Tuple , *snake_case_ : Tuple , **snake_case_ : Tuple ):
requires_backends(cls , ['''torch'''] )
@classmethod
def lowerCAmelCase (cls : Optional[int] , *snake_case_ : List[Any] , **snake_case_ : Union[str, Any] ):
requires_backends(cls , ['''torch'''] )
class UpperCamelCase__ ( metaclass=__lowercase ):
_SCREAMING_SNAKE_CASE : Optional[Any] = ["torch"]
def __init__(self : List[Any] , *snake_case_ : Tuple , **snake_case_ : Dict ):
requires_backends(self , ['''torch'''] )
@classmethod
def lowerCAmelCase (cls : List[str] , *snake_case_ : Any , **snake_case_ : List[str] ):
requires_backends(cls , ['''torch'''] )
@classmethod
def lowerCAmelCase (cls : Dict , *snake_case_ : Tuple , **snake_case_ : Union[str, Any] ):
requires_backends(cls , ['''torch'''] )
class UpperCamelCase__ ( metaclass=__lowercase ):
_SCREAMING_SNAKE_CASE : int = ["torch"]
def __init__(self : Union[str, Any] , *snake_case_ : Union[str, Any] , **snake_case_ : Union[str, Any] ):
requires_backends(self , ['''torch'''] )
@classmethod
def lowerCAmelCase (cls : Any , *snake_case_ : str , **snake_case_ : int ):
requires_backends(cls , ['''torch'''] )
@classmethod
def lowerCAmelCase (cls : str , *snake_case_ : Optional[Any] , **snake_case_ : Union[str, Any] ):
requires_backends(cls , ['''torch'''] )
class UpperCamelCase__ ( metaclass=__lowercase ):
_SCREAMING_SNAKE_CASE : str = ["torch"]
def __init__(self : int , *snake_case_ : Dict , **snake_case_ : Dict ):
requires_backends(self , ['''torch'''] )
@classmethod
def lowerCAmelCase (cls : str , *snake_case_ : int , **snake_case_ : Optional[int] ):
requires_backends(cls , ['''torch'''] )
@classmethod
def lowerCAmelCase (cls : Optional[int] , *snake_case_ : Any , **snake_case_ : Optional[Any] ):
requires_backends(cls , ['''torch'''] )
class UpperCamelCase__ ( metaclass=__lowercase ):
_SCREAMING_SNAKE_CASE : Optional[Any] = ["torch"]
def __init__(self : Optional[int] , *snake_case_ : str , **snake_case_ : List[Any] ):
requires_backends(self , ['''torch'''] )
@classmethod
def lowerCAmelCase (cls : Optional[int] , *snake_case_ : Any , **snake_case_ : Tuple ):
requires_backends(cls , ['''torch'''] )
@classmethod
def lowerCAmelCase (cls : Optional[int] , *snake_case_ : str , **snake_case_ : Union[str, Any] ):
requires_backends(cls , ['''torch'''] )
class UpperCamelCase__ ( metaclass=__lowercase ):
_SCREAMING_SNAKE_CASE : List[str] = ["torch"]
def __init__(self : Optional[Any] , *snake_case_ : List[Any] , **snake_case_ : List[Any] ):
requires_backends(self , ['''torch'''] )
@classmethod
def lowerCAmelCase (cls : Union[str, Any] , *snake_case_ : int , **snake_case_ : Optional[int] ):
requires_backends(cls , ['''torch'''] )
@classmethod
def lowerCAmelCase (cls : Tuple , *snake_case_ : Union[str, Any] , **snake_case_ : Union[str, Any] ):
requires_backends(cls , ['''torch'''] )
class UpperCamelCase__ ( metaclass=__lowercase ):
_SCREAMING_SNAKE_CASE : List[Any] = ["torch"]
def __init__(self : Any , *snake_case_ : Any , **snake_case_ : Dict ):
requires_backends(self , ['''torch'''] )
@classmethod
def lowerCAmelCase (cls : Tuple , *snake_case_ : List[str] , **snake_case_ : List[str] ):
requires_backends(cls , ['''torch'''] )
@classmethod
def lowerCAmelCase (cls : Optional[Any] , *snake_case_ : Optional[int] , **snake_case_ : Optional[Any] ):
requires_backends(cls , ['''torch'''] )
class UpperCamelCase__ ( metaclass=__lowercase ):
_SCREAMING_SNAKE_CASE : Dict = ["torch"]
def __init__(self : List[Any] , *snake_case_ : str , **snake_case_ : int ):
requires_backends(self , ['''torch'''] )
@classmethod
def lowerCAmelCase (cls : List[Any] , *snake_case_ : Tuple , **snake_case_ : str ):
requires_backends(cls , ['''torch'''] )
@classmethod
def lowerCAmelCase (cls : Optional[int] , *snake_case_ : str , **snake_case_ : Optional[int] ):
requires_backends(cls , ['''torch'''] )
class UpperCamelCase__ ( metaclass=__lowercase ):
_SCREAMING_SNAKE_CASE : str = ["torch"]
def __init__(self : str , *snake_case_ : List[Any] , **snake_case_ : Optional[Any] ):
requires_backends(self , ['''torch'''] )
@classmethod
def lowerCAmelCase (cls : Tuple , *snake_case_ : Dict , **snake_case_ : Any ):
requires_backends(cls , ['''torch'''] )
@classmethod
def lowerCAmelCase (cls : Union[str, Any] , *snake_case_ : Optional[int] , **snake_case_ : Dict ):
requires_backends(cls , ['''torch'''] )
class UpperCamelCase__ ( metaclass=__lowercase ):
_SCREAMING_SNAKE_CASE : int = ["torch"]
def __init__(self : Any , *snake_case_ : int , **snake_case_ : Union[str, Any] ):
requires_backends(self , ['''torch'''] )
@classmethod
def lowerCAmelCase (cls : int , *snake_case_ : Optional[int] , **snake_case_ : str ):
requires_backends(cls , ['''torch'''] )
@classmethod
def lowerCAmelCase (cls : Optional[int] , *snake_case_ : int , **snake_case_ : Dict ):
requires_backends(cls , ['''torch'''] )
class UpperCamelCase__ ( metaclass=__lowercase ):
_SCREAMING_SNAKE_CASE : int = ["torch"]
def __init__(self : List[Any] , *snake_case_ : str , **snake_case_ : Optional[Any] ):
requires_backends(self , ['''torch'''] )
@classmethod
def lowerCAmelCase (cls : Dict , *snake_case_ : Union[str, Any] , **snake_case_ : Optional[Any] ):
requires_backends(cls , ['''torch'''] )
@classmethod
def lowerCAmelCase (cls : str , *snake_case_ : Optional[int] , **snake_case_ : List[str] ):
requires_backends(cls , ['''torch'''] )
class UpperCamelCase__ ( metaclass=__lowercase ):
_SCREAMING_SNAKE_CASE : List[str] = ["torch"]
def __init__(self : Dict , *snake_case_ : Tuple , **snake_case_ : Dict ):
requires_backends(self , ['''torch'''] )
@classmethod
def lowerCAmelCase (cls : str , *snake_case_ : Union[str, Any] , **snake_case_ : str ):
requires_backends(cls , ['''torch'''] )
@classmethod
def lowerCAmelCase (cls : Tuple , *snake_case_ : Tuple , **snake_case_ : Dict ):
requires_backends(cls , ['''torch'''] )
class UpperCamelCase__ ( metaclass=__lowercase ):
_SCREAMING_SNAKE_CASE : int = ["torch"]
def __init__(self : Union[str, Any] , *snake_case_ : Any , **snake_case_ : Dict ):
requires_backends(self , ['''torch'''] )
@classmethod
def lowerCAmelCase (cls : Tuple , *snake_case_ : Optional[int] , **snake_case_ : Any ):
requires_backends(cls , ['''torch'''] )
@classmethod
def lowerCAmelCase (cls : List[Any] , *snake_case_ : List[Any] , **snake_case_ : List[Any] ):
requires_backends(cls , ['''torch'''] )
class UpperCamelCase__ ( metaclass=__lowercase ):
_SCREAMING_SNAKE_CASE : Any = ["torch"]
def __init__(self : int , *snake_case_ : Any , **snake_case_ : Dict ):
requires_backends(self , ['''torch'''] )
@classmethod
def lowerCAmelCase (cls : str , *snake_case_ : Any , **snake_case_ : Any ):
requires_backends(cls , ['''torch'''] )
@classmethod
def lowerCAmelCase (cls : str , *snake_case_ : List[str] , **snake_case_ : Union[str, Any] ):
requires_backends(cls , ['''torch'''] )
class UpperCamelCase__ ( metaclass=__lowercase ):
_SCREAMING_SNAKE_CASE : str = ["torch"]
def __init__(self : List[Any] , *snake_case_ : Any , **snake_case_ : List[str] ):
requires_backends(self , ['''torch'''] )
@classmethod
def lowerCAmelCase (cls : Tuple , *snake_case_ : Optional[int] , **snake_case_ : List[Any] ):
requires_backends(cls , ['''torch'''] )
@classmethod
def lowerCAmelCase (cls : Dict , *snake_case_ : List[str] , **snake_case_ : str ):
requires_backends(cls , ['''torch'''] )
class UpperCamelCase__ ( metaclass=__lowercase ):
_SCREAMING_SNAKE_CASE : List[Any] = ["torch"]
def __init__(self : Tuple , *snake_case_ : List[Any] , **snake_case_ : str ):
requires_backends(self , ['''torch'''] )
@classmethod
def lowerCAmelCase (cls : int , *snake_case_ : Tuple , **snake_case_ : str ):
requires_backends(cls , ['''torch'''] )
@classmethod
def lowerCAmelCase (cls : List[str] , *snake_case_ : str , **snake_case_ : str ):
requires_backends(cls , ['''torch'''] )
class UpperCamelCase__ ( metaclass=__lowercase ):
_SCREAMING_SNAKE_CASE : List[str] = ["torch"]
def __init__(self : Dict , *snake_case_ : List[Any] , **snake_case_ : Union[str, Any] ):
requires_backends(self , ['''torch'''] )
@classmethod
def lowerCAmelCase (cls : Tuple , *snake_case_ : int , **snake_case_ : List[str] ):
requires_backends(cls , ['''torch'''] )
@classmethod
def lowerCAmelCase (cls : Any , *snake_case_ : Dict , **snake_case_ : int ):
requires_backends(cls , ['''torch'''] )
class UpperCamelCase__ ( metaclass=__lowercase ):
_SCREAMING_SNAKE_CASE : str = ["torch"]
def __init__(self : List[str] , *snake_case_ : Any , **snake_case_ : Dict ):
requires_backends(self , ['''torch'''] )
@classmethod
def lowerCAmelCase (cls : Dict , *snake_case_ : List[str] , **snake_case_ : str ):
requires_backends(cls , ['''torch'''] )
@classmethod
def lowerCAmelCase (cls : int , *snake_case_ : Any , **snake_case_ : str ):
requires_backends(cls , ['''torch'''] )
class UpperCamelCase__ ( metaclass=__lowercase ):
_SCREAMING_SNAKE_CASE : Optional[Any] = ["torch"]
def __init__(self : Optional[int] , *snake_case_ : List[str] , **snake_case_ : Tuple ):
requires_backends(self , ['''torch'''] )
@classmethod
def lowerCAmelCase (cls : Union[str, Any] , *snake_case_ : str , **snake_case_ : Any ):
requires_backends(cls , ['''torch'''] )
@classmethod
def lowerCAmelCase (cls : Tuple , *snake_case_ : List[str] , **snake_case_ : Any ):
requires_backends(cls , ['''torch'''] )
class UpperCamelCase__ ( metaclass=__lowercase ):
_SCREAMING_SNAKE_CASE : Any = ["torch"]
def __init__(self : Union[str, Any] , *snake_case_ : Dict , **snake_case_ : Optional[int] ):
requires_backends(self , ['''torch'''] )
@classmethod
def lowerCAmelCase (cls : Optional[Any] , *snake_case_ : Any , **snake_case_ : Tuple ):
requires_backends(cls , ['''torch'''] )
@classmethod
def lowerCAmelCase (cls : int , *snake_case_ : str , **snake_case_ : Optional[Any] ):
requires_backends(cls , ['''torch'''] )
class UpperCamelCase__ ( metaclass=__lowercase ):
_SCREAMING_SNAKE_CASE : Union[str, Any] = ["torch"]
def __init__(self : Optional[int] , *snake_case_ : List[str] , **snake_case_ : Dict ):
requires_backends(self , ['''torch'''] )
@classmethod
def lowerCAmelCase (cls : Any , *snake_case_ : List[str] , **snake_case_ : List[Any] ):
requires_backends(cls , ['''torch'''] )
@classmethod
def lowerCAmelCase (cls : Optional[Any] , *snake_case_ : List[Any] , **snake_case_ : List[str] ):
requires_backends(cls , ['''torch'''] )
class UpperCamelCase__ ( metaclass=__lowercase ):
_SCREAMING_SNAKE_CASE : Dict = ["torch"]
def __init__(self : Tuple , *snake_case_ : str , **snake_case_ : str ):
requires_backends(self , ['''torch'''] )
@classmethod
def lowerCAmelCase (cls : Optional[Any] , *snake_case_ : int , **snake_case_ : Optional[int] ):
requires_backends(cls , ['''torch'''] )
@classmethod
def lowerCAmelCase (cls : Union[str, Any] , *snake_case_ : Optional[Any] , **snake_case_ : Optional[int] ):
requires_backends(cls , ['''torch'''] )
class UpperCamelCase__ ( metaclass=__lowercase ):
_SCREAMING_SNAKE_CASE : str = ["torch"]
def __init__(self : int , *snake_case_ : Tuple , **snake_case_ : Any ):
requires_backends(self , ['''torch'''] )
@classmethod
def lowerCAmelCase (cls : int , *snake_case_ : int , **snake_case_ : Optional[int] ):
requires_backends(cls , ['''torch'''] )
@classmethod
def lowerCAmelCase (cls : Optional[int] , *snake_case_ : List[str] , **snake_case_ : str ):
requires_backends(cls , ['''torch'''] )
class UpperCamelCase__ ( metaclass=__lowercase ):
_SCREAMING_SNAKE_CASE : Union[str, Any] = ["torch"]
def __init__(self : Union[str, Any] , *snake_case_ : Tuple , **snake_case_ : List[str] ):
requires_backends(self , ['''torch'''] )
@classmethod
def lowerCAmelCase (cls : Optional[int] , *snake_case_ : str , **snake_case_ : Tuple ):
requires_backends(cls , ['''torch'''] )
@classmethod
def lowerCAmelCase (cls : List[Any] , *snake_case_ : Dict , **snake_case_ : List[Any] ):
requires_backends(cls , ['''torch'''] )
| 90 |
# DISCLAIMER: This file is strongly influenced by https://github.com/ermongroup/ddim
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import flax
import jax
import jax.numpy as jnp
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils_flax import (
CommonSchedulerState,
FlaxKarrasDiffusionSchedulers,
FlaxSchedulerMixin,
FlaxSchedulerOutput,
add_noise_common,
get_velocity_common,
)
@flax.struct.dataclass
class UpperCamelCase__ :
_SCREAMING_SNAKE_CASE : CommonSchedulerState
# setable values
_SCREAMING_SNAKE_CASE : jnp.ndarray
_SCREAMING_SNAKE_CASE : jnp.ndarray
_SCREAMING_SNAKE_CASE : Optional[int] = None
@classmethod
def lowerCAmelCase (cls : int , snake_case_ : CommonSchedulerState , snake_case_ : jnp.ndarray , snake_case_ : jnp.ndarray ):
return cls(common=snake_case_ , init_noise_sigma=snake_case_ , timesteps=snake_case_ )
@dataclass
class UpperCamelCase__ ( __lowercase ):
_SCREAMING_SNAKE_CASE : DDPMSchedulerState
class UpperCamelCase__ ( __lowercase ,__lowercase ):
_SCREAMING_SNAKE_CASE : str = [e.name for e in FlaxKarrasDiffusionSchedulers]
_SCREAMING_SNAKE_CASE : jnp.dtype
@property
def lowerCAmelCase (self : Optional[Any] ):
return True
@register_to_config
def __init__(self : Any , snake_case_ : int = 1_0_0_0 , snake_case_ : float = 0.0001 , snake_case_ : float = 0.02 , snake_case_ : str = "linear" , snake_case_ : Optional[jnp.ndarray] = None , snake_case_ : str = "fixed_small" , snake_case_ : bool = True , snake_case_ : str = "epsilon" , snake_case_ : jnp.dtype = jnp.floataa , ):
__a : str = dtype
def lowerCAmelCase (self : Any , snake_case_ : Optional[CommonSchedulerState] = None ):
if common is None:
__a : Optional[Any] = CommonSchedulerState.create(self )
# standard deviation of the initial noise distribution
__a : int = jnp.array(1.0 , dtype=self.dtype )
__a : List[Any] = jnp.arange(0 , self.config.num_train_timesteps ).round()[::-1]
return DDPMSchedulerState.create(
common=snake_case_ , init_noise_sigma=snake_case_ , timesteps=snake_case_ , )
def lowerCAmelCase (self : Dict , snake_case_ : DDPMSchedulerState , snake_case_ : jnp.ndarray , snake_case_ : Optional[int] = None ):
return sample
def lowerCAmelCase (self : List[Any] , snake_case_ : DDPMSchedulerState , snake_case_ : int , snake_case_ : Tuple = () ):
__a : Tuple = self.config.num_train_timesteps // num_inference_steps
# creates integer timesteps by multiplying by ratio
# rounding to avoid issues when num_inference_step is power of 3
__a : Any = (jnp.arange(0 , snake_case_ ) * step_ratio).round()[::-1]
return state.replace(
num_inference_steps=snake_case_ , timesteps=snake_case_ , )
def lowerCAmelCase (self : List[Any] , snake_case_ : DDPMSchedulerState , snake_case_ : Optional[Any] , snake_case_ : List[str]=None , snake_case_ : Union[str, Any]=None ):
__a : Optional[Any] = state.common.alphas_cumprod[t]
__a : List[str] = jnp.where(t > 0 , state.common.alphas_cumprod[t - 1] , jnp.array(1.0 , dtype=self.dtype ) )
# For t > 0, compute predicted variance βt (see formula (6) and (7) from https://arxiv.org/pdf/2006.11239.pdf)
# and sample from it to get previous sample
# x_{t-1} ~ N(pred_prev_sample, variance) == add variance to pred_sample
__a : Optional[int] = (1 - alpha_prod_t_prev) / (1 - alpha_prod_t) * state.common.betas[t]
if variance_type is None:
__a : List[str] = self.config.variance_type
# hacks - were probably added for training stability
if variance_type == "fixed_small":
__a : Optional[Any] = jnp.clip(snake_case_ , a_min=1E-20 )
# for rl-diffuser https://arxiv.org/abs/2205.09991
elif variance_type == "fixed_small_log":
__a : int = jnp.log(jnp.clip(snake_case_ , a_min=1E-20 ) )
elif variance_type == "fixed_large":
__a : List[str] = state.common.betas[t]
elif variance_type == "fixed_large_log":
# Glide max_log
__a : Union[str, Any] = jnp.log(state.common.betas[t] )
elif variance_type == "learned":
return predicted_variance
elif variance_type == "learned_range":
__a : Any = variance
__a : Dict = state.common.betas[t]
__a : Any = (predicted_variance + 1) / 2
__a : Optional[Any] = frac * max_log + (1 - frac) * min_log
return variance
def lowerCAmelCase (self : Any , snake_case_ : DDPMSchedulerState , snake_case_ : jnp.ndarray , snake_case_ : int , snake_case_ : jnp.ndarray , snake_case_ : Optional[jax.random.KeyArray] = None , snake_case_ : bool = True , ):
__a : int = timestep
if key is None:
__a : Any = jax.random.PRNGKey(0 )
if model_output.shape[1] == sample.shape[1] * 2 and self.config.variance_type in ["learned", "learned_range"]:
__a , __a : List[str] = jnp.split(snake_case_ , sample.shape[1] , axis=1 )
else:
__a : int = None
# 1. compute alphas, betas
__a : Optional[int] = state.common.alphas_cumprod[t]
__a : Tuple = jnp.where(t > 0 , state.common.alphas_cumprod[t - 1] , jnp.array(1.0 , dtype=self.dtype ) )
__a : Optional[int] = 1 - alpha_prod_t
__a : Union[str, Any] = 1 - alpha_prod_t_prev
# 2. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf
if self.config.prediction_type == "epsilon":
__a : int = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
elif self.config.prediction_type == "sample":
__a : Union[str, Any] = model_output
elif self.config.prediction_type == "v_prediction":
__a : Tuple = (alpha_prod_t**0.5) * sample - (beta_prod_t**0.5) * model_output
else:
raise ValueError(
f"prediction_type given as {self.config.prediction_type} must be one of `epsilon`, `sample` "
''' for the FlaxDDPMScheduler.''' )
# 3. Clip "predicted x_0"
if self.config.clip_sample:
__a : Dict = jnp.clip(snake_case_ , -1 , 1 )
# 4. Compute coefficients for pred_original_sample x_0 and current sample x_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
__a : str = (alpha_prod_t_prev ** 0.5 * state.common.betas[t]) / beta_prod_t
__a : str = state.common.alphas[t] ** 0.5 * beta_prod_t_prev / beta_prod_t
# 5. Compute predicted previous sample µ_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
__a : Tuple = pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample
# 6. Add noise
def random_variance():
__a : Optional[int] = jax.random.split(snake_case_ , num=1 )
__a : Union[str, Any] = jax.random.normal(snake_case_ , shape=model_output.shape , dtype=self.dtype )
return (self._get_variance(snake_case_ , snake_case_ , predicted_variance=snake_case_ ) ** 0.5) * noise
__a : Dict = jnp.where(t > 0 , random_variance() , jnp.zeros(model_output.shape , dtype=self.dtype ) )
__a : Tuple = pred_prev_sample + variance
if not return_dict:
return (pred_prev_sample, state)
return FlaxDDPMSchedulerOutput(prev_sample=snake_case_ , state=snake_case_ )
def lowerCAmelCase (self : List[str] , snake_case_ : DDPMSchedulerState , snake_case_ : jnp.ndarray , snake_case_ : jnp.ndarray , snake_case_ : jnp.ndarray , ):
return add_noise_common(state.common , snake_case_ , snake_case_ , snake_case_ )
def lowerCAmelCase (self : str , snake_case_ : DDPMSchedulerState , snake_case_ : jnp.ndarray , snake_case_ : jnp.ndarray , snake_case_ : jnp.ndarray , ):
return get_velocity_common(state.common , snake_case_ , snake_case_ , snake_case_ )
def __len__(self : List[str] ):
return self.config.num_train_timesteps
| 90 | 1 |
import unittest
from transformers import AlbertTokenizer, AlbertTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
SCREAMING_SNAKE_CASE :Dict = get_tests_dir('fixtures/spiece.model')
@require_sentencepiece
@require_tokenizers
class UpperCAmelCase ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
snake_case_ = AlbertTokenizer
snake_case_ = AlbertTokenizerFast
snake_case_ = True
snake_case_ = True
snake_case_ = True
def UpperCamelCase_ ( self : Union[str, Any] ):
super().setUp()
# We have a SentencePiece fixture for testing
__A = AlbertTokenizer(A )
tokenizer.save_pretrained(self.tmpdirname )
def UpperCamelCase_ ( self : Tuple ,A : List[str] ):
__A = "this is a test"
__A = "this is a test"
return input_text, output_text
def UpperCamelCase_ ( self : List[Any] ):
__A = "<pad>"
__A = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(A ) ,A )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(A ) ,A )
def UpperCamelCase_ ( self : Optional[Any] ):
__A = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] ,"<pad>" )
self.assertEqual(vocab_keys[1] ,"<unk>" )
self.assertEqual(vocab_keys[-1] ,"▁eloquent" )
self.assertEqual(len(A ) ,3_00_00 )
def UpperCamelCase_ ( self : str ):
self.assertEqual(self.get_tokenizer().vocab_size ,3_00_00 )
def UpperCamelCase_ ( self : str ):
if not self.test_rust_tokenizer:
return
__A = self.get_tokenizer()
__A = self.get_rust_tokenizer()
__A = "I was born in 92000, and this is falsé."
__A = tokenizer.tokenize(A )
__A = rust_tokenizer.tokenize(A )
self.assertListEqual(A ,A )
__A = tokenizer.encode(A ,add_special_tokens=A )
__A = rust_tokenizer.encode(A ,add_special_tokens=A )
self.assertListEqual(A ,A )
__A = self.get_rust_tokenizer()
__A = tokenizer.encode(A )
__A = rust_tokenizer.encode(A )
self.assertListEqual(A ,A )
def UpperCamelCase_ ( self : Any ):
__A = AlbertTokenizer(A ,keep_accents=A )
__A = tokenizer.tokenize("This is a test" )
self.assertListEqual(A ,["▁this", "▁is", "▁a", "▁test"] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(A ) ,[48, 25, 21, 12_89] )
__A = tokenizer.tokenize("I was born in 92000, and this is falsé." )
self.assertListEqual(
A ,["▁i", "▁was", "▁born", "▁in", "▁9", "2000", ",", "▁and", "▁this", "▁is", "▁fal", "s", "é", "."] )
__A = tokenizer.convert_tokens_to_ids(A )
self.assertListEqual(A ,[31, 23, 3_86, 19, 5_61, 30_50, 15, 17, 48, 25, 82_56, 18, 1, 9] )
__A = tokenizer.convert_ids_to_tokens(A )
self.assertListEqual(
A ,["▁i", "▁was", "▁born", "▁in", "▁9", "2000", ",", "▁and", "▁this", "▁is", "▁fal", "s", "<unk>", "."] ,)
def UpperCamelCase_ ( self : List[str] ):
__A = AlbertTokenizer(A )
__A = tokenizer.encode("sequence builders" )
__A = tokenizer.encode("multi-sequence build" )
__A = tokenizer.build_inputs_with_special_tokens(A )
__A = tokenizer.build_inputs_with_special_tokens(A ,A )
assert encoded_sentence == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id]
assert encoded_pair == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [
tokenizer.sep_token_id
]
@slow
def UpperCamelCase_ ( self : int ):
# fmt: off
__A = {"attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], "input_ids": [[2, 2_19_70, 13, 5, 60_92, 1_67, 28, 71_03, 21_53, 6_73, 8, 70_28, 1_20_51, 18, 17, 71_03, 21_53, 6_73, 8, 35_15, 1_86_84, 8, 44_61, 6, 19_27, 2_97, 8, 1_20_60, 26_07, 18, 13, 5, 44_61, 15, 1_05_38, 38, 8, 1_35, 15, 8_22, 58, 15, 9_93, 1_03_63, 15, 14_60, 80_05, 44_61, 15, 9_93, 2_55, 23_28, 9, 9, 9, 6, 26, 11_12, 8_16, 32_60, 13, 5, 1_03, 23_77, 6, 17, 11_12, 8_16, 27_82, 13, 5, 1_03, 1_06_41, 6, 29, 84, 25_12, 24_30, 7_82, 1_86_84, 27_61, 19, 8_08, 24_30, 25_56, 17, 8_55, 14_80, 94_77, 40_91, 1_28, 1_17_12, 15, 71_03, 21_53, 6_73, 17, 2_48_83, 99_90, 9, 3], [2, 1_15_02, 25, 10_06, 20, 7_82, 8, 1_18_09, 8_55, 17_32, 1_93_93, 1_86_67, 37, 3_67, 2_10_18, 69, 18_54, 34, 1_18_60, 1_91_24, 27, 1_56, 2_25, 17, 1_93, 41_41, 19, 65, 91_24, 9, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [2, 14, 22_31, 8_86, 23_85, 1_76_59, 84, 14, 1_67_92, 19_52, 9, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], "token_type_ids": [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=A ,model_name="albert-base-v2" ,revision="6b6560eaf5ff2e250b00c50f380c5389a9c2d82e" ,)
| 15 |
def UpperCAmelCase ( a_ ) -> list:
"""simple docstring"""
if len(a_ ) <= 1:
return [tuple(a_ )]
__A = []
def generate(a_ , a_ ):
if k == 1:
res.append(tuple(arr[:] ) )
return
generate(k - 1 , a_ )
for i in range(k - 1 ):
if k % 2 == 0: # k is even
__A , __A = arr[k - 1], arr[i]
else: # k is odd
__A , __A = arr[k - 1], arr[0]
generate(k - 1 , a_ )
generate(len(a_ ) , a_ )
return res
if __name__ == "__main__":
SCREAMING_SNAKE_CASE :int = input('Enter numbers separated by a comma:\n').strip()
SCREAMING_SNAKE_CASE :Dict = [int(item) for item in user_input.split(',')]
print(heaps(arr))
| 15 | 1 |
"""simple docstring"""
import json
import os
import unittest
from transformers import AutoTokenizer, GPTaTokenizer, GPTaTokenizerFast
from transformers.models.gpta.tokenization_gpta import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class SCREAMING_SNAKE_CASE ( a_ , unittest.TestCase ):
"""simple docstring"""
lowercase__ = GPTaTokenizer
lowercase__ = GPTaTokenizerFast
lowercase__ = True
lowercase__ = {"add_prefix_space": True}
lowercase__ = False
def __lowerCAmelCase ( self : Optional[int] ):
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
lowerCAmelCase__ : Any = [
'''l''',
'''o''',
'''w''',
'''e''',
'''r''',
'''s''',
'''t''',
'''i''',
'''d''',
'''n''',
'''\u0120''',
'''\u0120l''',
'''\u0120n''',
'''\u0120lo''',
'''\u0120low''',
'''er''',
'''\u0120lowest''',
'''\u0120newer''',
'''\u0120wider''',
'''<unk>''',
'''<|endoftext|>''',
]
lowerCAmelCase__ : List[str] = dict(zip(lowercase_ ,range(len(lowercase_ ) ) ) )
lowerCAmelCase__ : Tuple = ['''#version: 0.2''', '''\u0120 l''', '''\u0120l o''', '''\u0120lo w''', '''e r''', '''''']
lowerCAmelCase__ : List[str] = {'''unk_token''': '''<unk>'''}
lowerCAmelCase__ : Optional[int] = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES['''vocab_file'''] )
lowerCAmelCase__ : int = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file ,'''w''' ,encoding='''utf-8''' ) as fp:
fp.write(json.dumps(lowercase_ ) + '''\n''' )
with open(self.merges_file ,'''w''' ,encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(lowercase_ ) )
def __lowerCAmelCase ( self : Dict ,**lowercase_ : Optional[int] ):
kwargs.update(self.special_tokens_map )
return GPTaTokenizer.from_pretrained(self.tmpdirname ,**lowercase_ )
def __lowerCAmelCase ( self : List[str] ,**lowercase_ : str ):
kwargs.update(self.special_tokens_map )
return GPTaTokenizerFast.from_pretrained(self.tmpdirname ,**lowercase_ )
def __lowerCAmelCase ( self : Optional[Any] ,lowercase_ : List[Any] ):
lowerCAmelCase__ : List[Any] = '''lower newer'''
lowerCAmelCase__ : int = '''lower newer'''
return input_text, output_text
def __lowerCAmelCase ( self : Optional[Any] ):
lowerCAmelCase__ : Union[str, Any] = GPTaTokenizer(self.vocab_file ,self.merges_file ,**self.special_tokens_map )
lowerCAmelCase__ : str = '''lower newer'''
lowerCAmelCase__ : Tuple = ['''\u0120low''', '''er''', '''\u0120''', '''n''', '''e''', '''w''', '''er''']
lowerCAmelCase__ : Tuple = tokenizer.tokenize(lowercase_ ,add_prefix_space=lowercase_ )
self.assertListEqual(lowercase_ ,lowercase_ )
lowerCAmelCase__ : Union[str, Any] = tokens + [tokenizer.unk_token]
lowerCAmelCase__ : List[str] = [1_4, 1_5, 1_0, 9, 3, 2, 1_5, 1_9]
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowercase_ ) ,lowercase_ )
def __lowerCAmelCase ( self : Optional[Any] ):
if not self.test_rust_tokenizer:
return
lowerCAmelCase__ : Optional[Any] = self.get_tokenizer()
lowerCAmelCase__ : Dict = self.get_rust_tokenizer(add_prefix_space=lowercase_ )
lowerCAmelCase__ : int = '''lower newer'''
# Testing tokenization
lowerCAmelCase__ : str = tokenizer.tokenize(lowercase_ ,add_prefix_space=lowercase_ )
lowerCAmelCase__ : Union[str, Any] = rust_tokenizer.tokenize(lowercase_ )
self.assertListEqual(lowercase_ ,lowercase_ )
# Testing conversion to ids without special tokens
lowerCAmelCase__ : Dict = tokenizer.encode(lowercase_ ,add_special_tokens=lowercase_ ,add_prefix_space=lowercase_ )
lowerCAmelCase__ : List[Any] = rust_tokenizer.encode(lowercase_ ,add_special_tokens=lowercase_ )
self.assertListEqual(lowercase_ ,lowercase_ )
# Testing conversion to ids with special tokens
lowerCAmelCase__ : Tuple = self.get_rust_tokenizer(add_prefix_space=lowercase_ )
lowerCAmelCase__ : Any = tokenizer.encode(lowercase_ ,add_prefix_space=lowercase_ )
lowerCAmelCase__ : str = rust_tokenizer.encode(lowercase_ )
self.assertListEqual(lowercase_ ,lowercase_ )
# Testing the unknown token
lowerCAmelCase__ : Any = tokens + [rust_tokenizer.unk_token]
lowerCAmelCase__ : Tuple = [1_4, 1_5, 1_0, 9, 3, 2, 1_5, 1_9]
self.assertListEqual(rust_tokenizer.convert_tokens_to_ids(lowercase_ ) ,lowercase_ )
def __lowerCAmelCase ( self : Union[str, Any] ,*lowercase_ : Optional[Any] ,**lowercase_ : Optional[Any] ):
# It's very difficult to mix/test pretokenization with byte-level
# And get both GPT2 and Roberta to work at the same time (mostly an issue of adding a space before the string)
pass
def __lowerCAmelCase ( self : Union[str, Any] ,lowercase_ : Optional[Any]=1_5 ):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'{tokenizer.__class__.__name__} ({pretrained_name})' ):
lowerCAmelCase__ : Optional[Any] = self.rust_tokenizer_class.from_pretrained(lowercase_ ,**lowercase_ )
# Simple input
lowerCAmelCase__ : str = '''This is a simple input'''
lowerCAmelCase__ : Tuple = ['''This is a simple input 1''', '''This is a simple input 2''']
lowerCAmelCase__ : List[str] = ('''This is a simple input''', '''This is a pair''')
lowerCAmelCase__ : Optional[int] = [
('''This is a simple input 1''', '''This is a simple input 2'''),
('''This is a simple pair 1''', '''This is a simple pair 2'''),
]
# Simple input tests
self.assertRaises(lowercase_ ,tokenizer_r.encode ,lowercase_ ,max_length=lowercase_ ,padding='''max_length''' )
# Simple input
self.assertRaises(lowercase_ ,tokenizer_r.encode_plus ,lowercase_ ,max_length=lowercase_ ,padding='''max_length''' )
# Simple input
self.assertRaises(
lowercase_ ,tokenizer_r.batch_encode_plus ,lowercase_ ,max_length=lowercase_ ,padding='''max_length''' ,)
# Pair input
self.assertRaises(lowercase_ ,tokenizer_r.encode ,lowercase_ ,max_length=lowercase_ ,padding='''max_length''' )
# Pair input
self.assertRaises(lowercase_ ,tokenizer_r.encode_plus ,lowercase_ ,max_length=lowercase_ ,padding='''max_length''' )
# Pair input
self.assertRaises(
lowercase_ ,tokenizer_r.batch_encode_plus ,lowercase_ ,max_length=lowercase_ ,padding='''max_length''' ,)
def __lowerCAmelCase ( self : Union[str, Any] ):
lowerCAmelCase__ : Dict = GPTaTokenizer.from_pretrained(self.tmpdirname ,pad_token='''<pad>''' )
# Simple input
lowerCAmelCase__ : Any = '''This is a simple input'''
lowerCAmelCase__ : Tuple = ['''This is a simple input looooooooong''', '''This is a simple input''']
lowerCAmelCase__ : Optional[int] = ('''This is a simple input''', '''This is a pair''')
lowerCAmelCase__ : Optional[int] = [
('''This is a simple input loooooong''', '''This is a simple input'''),
('''This is a simple pair loooooong''', '''This is a simple pair'''),
]
lowerCAmelCase__ : str = tokenizer.pad_token_id
lowerCAmelCase__ : List[Any] = tokenizer(lowercase_ ,padding='''max_length''' ,max_length=3_0 ,return_tensors='''np''' )
lowerCAmelCase__ : int = tokenizer(lowercase_ ,padding=lowercase_ ,truncate=lowercase_ ,return_tensors='''np''' )
lowerCAmelCase__ : int = tokenizer(*lowercase_ ,padding='''max_length''' ,max_length=6_0 ,return_tensors='''np''' )
lowerCAmelCase__ : Optional[Any] = tokenizer(lowercase_ ,padding=lowercase_ ,truncate=lowercase_ ,return_tensors='''np''' )
# s
# test single string max_length padding
self.assertEqual(out_s['''input_ids'''].shape[-1] ,3_0 )
self.assertTrue(pad_token_id in out_s['''input_ids'''] )
self.assertTrue(0 in out_s['''attention_mask'''] )
# s2
# test automatic padding
self.assertEqual(out_sa['''input_ids'''].shape[-1] ,3_3 )
# long slice doesn't have padding
self.assertFalse(pad_token_id in out_sa['''input_ids'''][0] )
self.assertFalse(0 in out_sa['''attention_mask'''][0] )
# short slice does have padding
self.assertTrue(pad_token_id in out_sa['''input_ids'''][1] )
self.assertTrue(0 in out_sa['''attention_mask'''][1] )
# p
# test single pair max_length padding
self.assertEqual(out_p['''input_ids'''].shape[-1] ,6_0 )
self.assertTrue(pad_token_id in out_p['''input_ids'''] )
self.assertTrue(0 in out_p['''attention_mask'''] )
# p2
# test automatic padding pair
self.assertEqual(out_pa['''input_ids'''].shape[-1] ,5_2 )
# long slice pair doesn't have padding
self.assertFalse(pad_token_id in out_pa['''input_ids'''][0] )
self.assertFalse(0 in out_pa['''attention_mask'''][0] )
# short slice pair does have padding
self.assertTrue(pad_token_id in out_pa['''input_ids'''][1] )
self.assertTrue(0 in out_pa['''attention_mask'''][1] )
def __lowerCAmelCase ( self : Dict ):
lowerCAmelCase__ : Optional[Any] = '''$$$'''
lowerCAmelCase__ : str = GPTaTokenizer.from_pretrained(self.tmpdirname ,bos_token=lowercase_ ,add_bos_token=lowercase_ )
lowerCAmelCase__ : Union[str, Any] = '''This is a simple input'''
lowerCAmelCase__ : List[str] = ['''This is a simple input 1''', '''This is a simple input 2''']
lowerCAmelCase__ : int = tokenizer.bos_token_id
lowerCAmelCase__ : List[Any] = tokenizer(lowercase_ )
lowerCAmelCase__ : List[Any] = tokenizer(lowercase_ )
self.assertEqual(out_s.input_ids[0] ,lowercase_ )
self.assertTrue(all(o[0] == bos_token_id for o in out_sa.input_ids ) )
lowerCAmelCase__ : str = tokenizer.decode(out_s.input_ids )
lowerCAmelCase__ : str = tokenizer.batch_decode(out_sa.input_ids )
self.assertEqual(decode_s.split()[0] ,lowercase_ )
self.assertTrue(all(d.split()[0] == bos_token for d in decode_sa ) )
def __lowerCAmelCase ( self : Tuple ):
pass
def __lowerCAmelCase ( self : int ):
# TODO: change to self.get_tokenizers() when the fast version is implemented
lowerCAmelCase__ : List[Any] = [self.get_tokenizer(do_lower_case=lowercase_ ,add_bos_token=lowercase_ )]
for tokenizer in tokenizers:
with self.subTest(F'{tokenizer.__class__.__name__}' ):
lowerCAmelCase__ : Optional[Any] = '''Encode this.'''
lowerCAmelCase__ : List[str] = '''This one too please.'''
lowerCAmelCase__ : Optional[Any] = tokenizer.encode(lowercase_ ,add_special_tokens=lowercase_ )
encoded_sequence += tokenizer.encode(lowercase_ ,add_special_tokens=lowercase_ )
lowerCAmelCase__ : Any = tokenizer.encode_plus(
lowercase_ ,lowercase_ ,add_special_tokens=lowercase_ ,return_special_tokens_mask=lowercase_ ,)
lowerCAmelCase__ : int = encoded_sequence_dict['''input_ids''']
lowerCAmelCase__ : str = encoded_sequence_dict['''special_tokens_mask''']
self.assertEqual(len(lowercase_ ) ,len(lowercase_ ) )
lowerCAmelCase__ : Any = [
(x if not special_tokens_mask[i] else None) for i, x in enumerate(lowercase_ )
]
lowerCAmelCase__ : Optional[Any] = [x for x in filtered_sequence if x is not None]
self.assertEqual(lowercase_ ,lowercase_ )
@require_tokenizers
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
"""simple docstring"""
def __lowerCAmelCase ( self : List[str] ):
# More context:
# https://huggingface.co/wjmcat/opt-350m-paddle/discussions/1
# https://huggingface.slack.com/archives/C01N44FJDHT/p1653511495183519
# https://github.com/huggingface/transformers/pull/17088#discussion_r871246439
lowerCAmelCase__ : int = AutoTokenizer.from_pretrained('''facebook/opt-350m''' ,from_slow=lowercase_ )
lowerCAmelCase__ : Optional[Any] = '''A photo of a cat'''
lowerCAmelCase__ : int = tokenizer.encode(
lowercase_ ,)
self.assertEqual(lowercase_ ,[2, 2_5_0, 1_3_4_5, 9, 1_0, 4_7_5_8] )
tokenizer.save_pretrained('''test_opt''' )
lowerCAmelCase__ : Optional[int] = AutoTokenizer.from_pretrained('''./test_opt''' )
lowerCAmelCase__ : int = tokenizer.encode(
lowercase_ ,)
self.assertEqual(lowercase_ ,[2, 2_5_0, 1_3_4_5, 9, 1_0, 4_7_5_8] )
def __lowerCAmelCase ( self : List[Any] ):
lowerCAmelCase__ : Tuple = AutoTokenizer.from_pretrained('''facebook/opt-350m''' ,use_slow=lowercase_ )
lowerCAmelCase__ : Union[str, Any] = '''A photo of a cat'''
lowerCAmelCase__ : int = tokenizer.encode(
lowercase_ ,)
# Same as above
self.assertEqual(lowercase_ ,[2, 2_5_0, 1_3_4_5, 9, 1_0, 4_7_5_8] )
@unittest.skip('''This test is failing because of a bug in the fast tokenizer''' )
def __lowerCAmelCase ( self : Any ):
lowerCAmelCase__ : Optional[int] = AutoTokenizer.from_pretrained('''facebook/opt-350m''' ,from_slow=lowercase_ )
lowerCAmelCase__ : int = '''bos'''
lowerCAmelCase__ : Dict = tokenizer.get_vocab()['''bos''']
lowerCAmelCase__ : List[str] = '''A photo of a cat'''
lowerCAmelCase__ : int = tokenizer.encode(
lowercase_ ,)
# We changed the bos token
self.assertEqual(lowercase_ ,[3_1_9_5_7, 2_5_0, 1_3_4_5, 9, 1_0, 4_7_5_8] )
tokenizer.save_pretrained('''./tok''' )
lowerCAmelCase__ : Dict = AutoTokenizer.from_pretrained('''./tok''' )
self.assertTrue(tokenizer.is_fast )
lowerCAmelCase__ : Optional[int] = tokenizer.encode(
lowercase_ ,)
self.assertEqual(lowercase_ ,[3_1_9_5_7, 2_5_0, 1_3_4_5, 9, 1_0, 4_7_5_8] )
| 74 |
"""simple docstring"""
import webbrowser
from sys import argv
from urllib.parse import parse_qs, quote
import requests
from bsa import BeautifulSoup
from fake_useragent import UserAgent
if __name__ == "__main__":
__UpperCamelCase : int = '''%20'''.join(argv[1:]) if len(argv) > 1 else quote(str(input('''Search: ''')))
print('''Googling.....''')
__UpperCamelCase : Dict = F'''https://www.google.com/search?q={query}&num=100'''
__UpperCamelCase : Tuple = requests.get(
url,
headers={'''User-Agent''': str(UserAgent().random)},
)
try:
__UpperCamelCase : Tuple = (
BeautifulSoup(res.text, '''html.parser''')
.find('''div''', attrs={'''class''': '''yuRUbf'''})
.find('''a''')
.get('''href''')
)
except AttributeError:
__UpperCamelCase : Optional[Any] = parse_qs(
BeautifulSoup(res.text, '''html.parser''')
.find('''div''', attrs={'''class''': '''kCrYT'''})
.find('''a''')
.get('''href''')
)['''url'''][0]
webbrowser.open(link)
| 74 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowerCAmelCase : Union[str, Any] = {
'configuration_mobilebert': [
'MOBILEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP',
'MobileBertConfig',
'MobileBertOnnxConfig',
],
'tokenization_mobilebert': ['MobileBertTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : List[Any] = ['MobileBertTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : List[Any] = [
'MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'MobileBertForMaskedLM',
'MobileBertForMultipleChoice',
'MobileBertForNextSentencePrediction',
'MobileBertForPreTraining',
'MobileBertForQuestionAnswering',
'MobileBertForSequenceClassification',
'MobileBertForTokenClassification',
'MobileBertLayer',
'MobileBertModel',
'MobileBertPreTrainedModel',
'load_tf_weights_in_mobilebert',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : Optional[Any] = [
'TF_MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFMobileBertForMaskedLM',
'TFMobileBertForMultipleChoice',
'TFMobileBertForNextSentencePrediction',
'TFMobileBertForPreTraining',
'TFMobileBertForQuestionAnswering',
'TFMobileBertForSequenceClassification',
'TFMobileBertForTokenClassification',
'TFMobileBertMainLayer',
'TFMobileBertModel',
'TFMobileBertPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_mobilebert import (
MOBILEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
MobileBertConfig,
MobileBertOnnxConfig,
)
from .tokenization_mobilebert import MobileBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mobilebert_fast import MobileBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mobilebert import (
MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
MobileBertForMaskedLM,
MobileBertForMultipleChoice,
MobileBertForNextSentencePrediction,
MobileBertForPreTraining,
MobileBertForQuestionAnswering,
MobileBertForSequenceClassification,
MobileBertForTokenClassification,
MobileBertLayer,
MobileBertModel,
MobileBertPreTrainedModel,
load_tf_weights_in_mobilebert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_mobilebert import (
TF_MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFMobileBertForMaskedLM,
TFMobileBertForMultipleChoice,
TFMobileBertForNextSentencePrediction,
TFMobileBertForPreTraining,
TFMobileBertForQuestionAnswering,
TFMobileBertForSequenceClassification,
TFMobileBertForTokenClassification,
TFMobileBertMainLayer,
TFMobileBertModel,
TFMobileBertPreTrainedModel,
)
else:
import sys
lowerCAmelCase : int = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 253 |
import json
import os
from functools import lru_cache
from typing import List, Optional, Tuple
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
lowerCAmelCase : Tuple = logging.get_logger(__name__)
lowerCAmelCase : str = {'vocab_file': 'vocab.json', 'merges_file': 'merges.txt'}
lowerCAmelCase : Dict = {
'vocab_file': {
'allenai/longformer-base-4096': 'https://huggingface.co/allenai/longformer-base-4096/resolve/main/vocab.json',
'allenai/longformer-large-4096': (
'https://huggingface.co/allenai/longformer-large-4096/resolve/main/vocab.json'
),
'allenai/longformer-large-4096-finetuned-triviaqa': (
'https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/vocab.json'
),
'allenai/longformer-base-4096-extra.pos.embd.only': (
'https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/vocab.json'
),
'allenai/longformer-large-4096-extra.pos.embd.only': (
'https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/vocab.json'
),
},
'merges_file': {
'allenai/longformer-base-4096': 'https://huggingface.co/allenai/longformer-base-4096/resolve/main/merges.txt',
'allenai/longformer-large-4096': (
'https://huggingface.co/allenai/longformer-large-4096/resolve/main/merges.txt'
),
'allenai/longformer-large-4096-finetuned-triviaqa': (
'https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/merges.txt'
),
'allenai/longformer-base-4096-extra.pos.embd.only': (
'https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/merges.txt'
),
'allenai/longformer-large-4096-extra.pos.embd.only': (
'https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/merges.txt'
),
},
}
lowerCAmelCase : Dict = {
'allenai/longformer-base-4096': 40_96,
'allenai/longformer-large-4096': 40_96,
'allenai/longformer-large-4096-finetuned-triviaqa': 40_96,
'allenai/longformer-base-4096-extra.pos.embd.only': 40_96,
'allenai/longformer-large-4096-extra.pos.embd.only': 40_96,
}
@lru_cache()
# Copied from transformers.models.roberta.tokenization_roberta.bytes_to_unicode
def A_ ( ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = (
list(range(ord('!' ) , ord('~' ) + 1 ) ) + list(range(ord('¡' ) , ord('¬' ) + 1 ) ) + list(range(ord('®' ) , ord('ÿ' ) + 1 ) )
)
SCREAMING_SNAKE_CASE_ : List[str] = bs[:]
SCREAMING_SNAKE_CASE_ : Union[str, Any] = 0
for b in range(2**8 ):
if b not in bs:
bs.append(a )
cs.append(2**8 + n )
n += 1
SCREAMING_SNAKE_CASE_ : int = [chr(a ) for n in cs]
return dict(zip(a , a ) )
def A_ ( a ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = set()
SCREAMING_SNAKE_CASE_ : Optional[Any] = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
SCREAMING_SNAKE_CASE_ : Any = char
return pairs
class _A ( __magic_name__):
SCREAMING_SNAKE_CASE : Tuple = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE : Any = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE : Tuple = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE : int = ['''input_ids''', '''attention_mask''']
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE="replace" , _SCREAMING_SNAKE_CASE="<s>" , _SCREAMING_SNAKE_CASE="</s>" , _SCREAMING_SNAKE_CASE="</s>" , _SCREAMING_SNAKE_CASE="<s>" , _SCREAMING_SNAKE_CASE="<unk>" , _SCREAMING_SNAKE_CASE="<pad>" , _SCREAMING_SNAKE_CASE="<mask>" , _SCREAMING_SNAKE_CASE=False , **_SCREAMING_SNAKE_CASE , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = AddedToken(_SCREAMING_SNAKE_CASE , lstrip=_SCREAMING_SNAKE_CASE , rstrip=_SCREAMING_SNAKE_CASE ) if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) else bos_token
SCREAMING_SNAKE_CASE_ : Dict = AddedToken(_SCREAMING_SNAKE_CASE , lstrip=_SCREAMING_SNAKE_CASE , rstrip=_SCREAMING_SNAKE_CASE ) if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) else eos_token
SCREAMING_SNAKE_CASE_ : int = AddedToken(_SCREAMING_SNAKE_CASE , lstrip=_SCREAMING_SNAKE_CASE , rstrip=_SCREAMING_SNAKE_CASE ) if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) else sep_token
SCREAMING_SNAKE_CASE_ : Any = AddedToken(_SCREAMING_SNAKE_CASE , lstrip=_SCREAMING_SNAKE_CASE , rstrip=_SCREAMING_SNAKE_CASE ) if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) else cls_token
SCREAMING_SNAKE_CASE_ : Optional[Any] = AddedToken(_SCREAMING_SNAKE_CASE , lstrip=_SCREAMING_SNAKE_CASE , rstrip=_SCREAMING_SNAKE_CASE ) if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) else unk_token
SCREAMING_SNAKE_CASE_ : Dict = AddedToken(_SCREAMING_SNAKE_CASE , lstrip=_SCREAMING_SNAKE_CASE , rstrip=_SCREAMING_SNAKE_CASE ) if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
SCREAMING_SNAKE_CASE_ : Any = AddedToken(_SCREAMING_SNAKE_CASE , lstrip=_SCREAMING_SNAKE_CASE , rstrip=_SCREAMING_SNAKE_CASE ) if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) else mask_token
super().__init__(
errors=_SCREAMING_SNAKE_CASE , bos_token=_SCREAMING_SNAKE_CASE , eos_token=_SCREAMING_SNAKE_CASE , unk_token=_SCREAMING_SNAKE_CASE , sep_token=_SCREAMING_SNAKE_CASE , cls_token=_SCREAMING_SNAKE_CASE , pad_token=_SCREAMING_SNAKE_CASE , mask_token=_SCREAMING_SNAKE_CASE , add_prefix_space=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE , )
with open(_SCREAMING_SNAKE_CASE , encoding='utf-8' ) as vocab_handle:
SCREAMING_SNAKE_CASE_ : List[str] = json.load(_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ : Optional[int] = {v: k for k, v in self.encoder.items()}
SCREAMING_SNAKE_CASE_ : int = errors # how to handle errors in decoding
SCREAMING_SNAKE_CASE_ : List[Any] = bytes_to_unicode()
SCREAMING_SNAKE_CASE_ : Union[str, Any] = {v: k for k, v in self.byte_encoder.items()}
with open(_SCREAMING_SNAKE_CASE , encoding='utf-8' ) as merges_handle:
SCREAMING_SNAKE_CASE_ : Optional[int] = merges_handle.read().split('\n' )[1:-1]
SCREAMING_SNAKE_CASE_ : List[Any] = [tuple(merge.split() ) for merge in bpe_merges]
SCREAMING_SNAKE_CASE_ : Dict = dict(zip(_SCREAMING_SNAKE_CASE , range(len(_SCREAMING_SNAKE_CASE ) ) ) )
SCREAMING_SNAKE_CASE_ : Dict = {}
SCREAMING_SNAKE_CASE_ : List[Any] = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
SCREAMING_SNAKE_CASE_ : Tuple = re.compile(r'\'s|\'t|\'re|\'ve|\'m|\'ll|\'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+' )
@property
def UpperCAmelCase ( self ):
"""simple docstring"""
return len(self.encoder )
def UpperCAmelCase ( self ):
"""simple docstring"""
return dict(self.encoder , **self.added_tokens_encoder )
def UpperCAmelCase ( self , _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if token in self.cache:
return self.cache[token]
SCREAMING_SNAKE_CASE_ : Optional[int] = tuple(_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ : Dict = get_pairs(_SCREAMING_SNAKE_CASE )
if not pairs:
return token
while True:
SCREAMING_SNAKE_CASE_ : int = min(_SCREAMING_SNAKE_CASE , key=lambda _SCREAMING_SNAKE_CASE : self.bpe_ranks.get(_SCREAMING_SNAKE_CASE , float('inf' ) ) )
if bigram not in self.bpe_ranks:
break
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Any = bigram
SCREAMING_SNAKE_CASE_ : List[Any] = []
SCREAMING_SNAKE_CASE_ : List[Any] = 0
while i < len(_SCREAMING_SNAKE_CASE ):
try:
SCREAMING_SNAKE_CASE_ : Any = word.index(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
SCREAMING_SNAKE_CASE_ : Tuple = j
if word[i] == first and i < len(_SCREAMING_SNAKE_CASE ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
SCREAMING_SNAKE_CASE_ : str = tuple(_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ : Tuple = new_word
if len(_SCREAMING_SNAKE_CASE ) == 1:
break
else:
SCREAMING_SNAKE_CASE_ : Any = get_pairs(_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ : List[Any] = ' '.join(_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ : Tuple = word
return word
def UpperCAmelCase ( self , _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = []
for token in re.findall(self.pat , _SCREAMING_SNAKE_CASE ):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = ''.join(
self.byte_encoder[b] for b in token.encode('utf-8' ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(_SCREAMING_SNAKE_CASE ).split(' ' ) )
return bpe_tokens
def UpperCAmelCase ( self , _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
return self.encoder.get(_SCREAMING_SNAKE_CASE , self.encoder.get(self.unk_token ) )
def UpperCAmelCase ( self , _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
return self.decoder.get(_SCREAMING_SNAKE_CASE )
def UpperCAmelCase ( self , _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = ''.join(_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ : str = bytearray([self.byte_decoder[c] for c in text] ).decode('utf-8' , errors=self.errors )
return text
def UpperCAmelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None ):
"""simple docstring"""
if not os.path.isdir(_SCREAMING_SNAKE_CASE ):
logger.error(f"Vocabulary path ({save_directory}) should be a directory" )
return
SCREAMING_SNAKE_CASE_ : Tuple = os.path.join(
_SCREAMING_SNAKE_CASE , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
SCREAMING_SNAKE_CASE_ : Optional[Any] = os.path.join(
_SCREAMING_SNAKE_CASE , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['merges_file'] )
with open(_SCREAMING_SNAKE_CASE , 'w' , encoding='utf-8' ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=_SCREAMING_SNAKE_CASE , ensure_ascii=_SCREAMING_SNAKE_CASE ) + '\n' )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = 0
with open(_SCREAMING_SNAKE_CASE , 'w' , encoding='utf-8' ) as writer:
writer.write('#version: 0.2\n' )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda _SCREAMING_SNAKE_CASE : kv[1] ):
if index != token_index:
logger.warning(
f"Saving vocabulary to {merge_file}: BPE merge indices are not consecutive."
' Please check that the tokenizer is not corrupted!' )
SCREAMING_SNAKE_CASE_ : List[Any] = token_index
writer.write(' '.join(_SCREAMING_SNAKE_CASE ) + '\n' )
index += 1
return vocab_file, merge_file
def UpperCAmelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None ):
"""simple docstring"""
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
SCREAMING_SNAKE_CASE_ : Optional[int] = [self.cls_token_id]
SCREAMING_SNAKE_CASE_ : List[str] = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def UpperCAmelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = False ):
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_SCREAMING_SNAKE_CASE , token_ids_a=_SCREAMING_SNAKE_CASE , already_has_special_tokens=_SCREAMING_SNAKE_CASE )
if token_ids_a is None:
return [1] + ([0] * len(_SCREAMING_SNAKE_CASE )) + [1]
return [1] + ([0] * len(_SCREAMING_SNAKE_CASE )) + [1, 1] + ([0] * len(_SCREAMING_SNAKE_CASE )) + [1]
def UpperCAmelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = [self.sep_token_id]
SCREAMING_SNAKE_CASE_ : List[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def UpperCAmelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=False , **_SCREAMING_SNAKE_CASE ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[int] = kwargs.pop('add_prefix_space' , self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(_SCREAMING_SNAKE_CASE ) > 0 and not text[0].isspace()):
SCREAMING_SNAKE_CASE_ : List[Any] = ' ' + text
return (text, kwargs)
| 253 | 1 |
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DiffusionPipeline,
EulerDiscreteScheduler,
StableDiffusionXLImgaImgPipeline,
UNetaDConditionModel,
)
from diffusers.utils import floats_tensor, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class _UpperCamelCase ( UpperCamelCase__ , UpperCamelCase__ , unittest.TestCase ):
'''simple docstring'''
__UpperCamelCase : Tuple = StableDiffusionXLImgaImgPipeline
__UpperCamelCase : str = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {"""height""", """width"""}
__UpperCamelCase : int = PipelineTesterMixin.required_optional_params - {"""latents"""}
__UpperCamelCase : List[str] = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
__UpperCamelCase : str = IMAGE_TO_IMAGE_IMAGE_PARAMS
__UpperCamelCase : List[str] = IMAGE_TO_IMAGE_IMAGE_PARAMS
def lowerCAmelCase__ ( self : Any ):
torch.manual_seed(0 )
UpperCamelCase_: str = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , attention_head_dim=(2, 4) , use_linear_projection=__lowerCamelCase , addition_embed_type="""text_time""" , addition_time_embed_dim=8 , transformer_layers_per_block=(1, 2) , projection_class_embeddings_input_dim=80 , cross_attention_dim=64 , )
UpperCamelCase_: Dict = EulerDiscreteScheduler(
beta_start=0.0_0085 , beta_end=0.012 , steps_offset=1 , beta_schedule="""scaled_linear""" , timestep_spacing="""leading""" , )
torch.manual_seed(0 )
UpperCamelCase_: Union[str, Any] = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , sample_size=128 , )
torch.manual_seed(0 )
UpperCamelCase_: str = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , hidden_act="""gelu""" , projection_dim=32 , )
UpperCamelCase_: Tuple = CLIPTextModel(__lowerCamelCase )
UpperCamelCase_: str = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" , local_files_only=__lowerCamelCase )
UpperCamelCase_: List[str] = CLIPTextModelWithProjection(__lowerCamelCase )
UpperCamelCase_: Union[str, Any] = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" , local_files_only=__lowerCamelCase )
UpperCamelCase_: int = {
"""unet""": unet,
"""scheduler""": scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""text_encoder_2""": text_encoder_a,
"""tokenizer_2""": tokenizer_a,
# "safety_checker": None,
# "feature_extractor": None,
}
return components
def lowerCAmelCase__ ( self : Any , snake_case_ : int , snake_case_ : Tuple=0 ):
UpperCamelCase_: str = floats_tensor((1, 3, 32, 32) , rng=random.Random(__lowerCamelCase ) ).to(__lowerCamelCase )
UpperCamelCase_: Dict = image / 2 + 0.5
if str(__lowerCamelCase ).startswith("""mps""" ):
UpperCamelCase_: Tuple = torch.manual_seed(__lowerCamelCase )
else:
UpperCamelCase_: Tuple = torch.Generator(device=__lowerCamelCase ).manual_seed(__lowerCamelCase )
UpperCamelCase_: Union[str, Any] = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""image""": image,
"""generator""": generator,
"""num_inference_steps""": 2,
"""guidance_scale""": 5.0,
"""output_type""": """numpy""",
"""strength""": 0.75,
}
return inputs
def lowerCAmelCase__ ( self : Optional[int] ):
UpperCamelCase_: Optional[Any] = """cpu""" # ensure determinism for the device-dependent torch.Generator
UpperCamelCase_: str = self.get_dummy_components()
UpperCamelCase_: Any = StableDiffusionXLImgaImgPipeline(**__lowerCamelCase )
UpperCamelCase_: Union[str, Any] = sd_pipe.to(__lowerCamelCase )
sd_pipe.set_progress_bar_config(disable=__lowerCamelCase )
UpperCamelCase_: List[str] = self.get_dummy_inputs(__lowerCamelCase )
UpperCamelCase_: List[Any] = sd_pipe(**__lowerCamelCase ).images
UpperCamelCase_: int = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
UpperCamelCase_: Any = np.array([0.4656, 0.4840, 0.4439, 0.6698, 0.5574, 0.4524, 0.5799, 0.5943, 0.5165] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def lowerCAmelCase__ ( self : Dict ):
super().test_attention_slicing_forward_pass(expected_max_diff=3e-3 )
def lowerCAmelCase__ ( self : int ):
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
def lowerCAmelCase__ ( self : List[str] ):
pass
def lowerCAmelCase__ ( self : int ):
UpperCamelCase_: List[Any] = self.get_dummy_components()
UpperCamelCase_: Tuple = StableDiffusionXLImgaImgPipeline(**__lowerCamelCase )
UpperCamelCase_: int = sd_pipe.to(__lowerCamelCase )
UpperCamelCase_: Tuple = sd_pipe.to(__lowerCamelCase )
sd_pipe.set_progress_bar_config(disable=__lowerCamelCase )
# forward without prompt embeds
UpperCamelCase_: Dict = self.get_dummy_inputs(__lowerCamelCase )
UpperCamelCase_: Union[str, Any] = 3 * ["""this is a negative prompt"""]
UpperCamelCase_: int = negative_prompt
UpperCamelCase_: int = 3 * [inputs["""prompt"""]]
UpperCamelCase_: List[Any] = sd_pipe(**__lowerCamelCase )
UpperCamelCase_: List[str] = output.images[0, -3:, -3:, -1]
# forward with prompt embeds
UpperCamelCase_: List[Any] = self.get_dummy_inputs(__lowerCamelCase )
UpperCamelCase_: Optional[Any] = 3 * ["""this is a negative prompt"""]
UpperCamelCase_: str = 3 * [inputs.pop("""prompt""" )]
(
(
UpperCamelCase_
), (
UpperCamelCase_
), (
UpperCamelCase_
), (
UpperCamelCase_
),
): Tuple = sd_pipe.encode_prompt(__lowerCamelCase , negative_prompt=__lowerCamelCase )
UpperCamelCase_: Dict = sd_pipe(
**__lowerCamelCase , prompt_embeds=__lowerCamelCase , negative_prompt_embeds=__lowerCamelCase , pooled_prompt_embeds=__lowerCamelCase , negative_pooled_prompt_embeds=__lowerCamelCase , )
UpperCamelCase_: int = output.images[0, -3:, -3:, -1]
# make sure that it's equal
assert np.abs(image_slice_a.flatten() - image_slice_a.flatten() ).max() < 1e-4
@slow
@require_torch_gpu
class _UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
def lowerCAmelCase__ ( self : Union[str, Any] ):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCAmelCase__ ( self : List[str] , snake_case_ : List[str] , snake_case_ : Tuple="cpu" , snake_case_ : Optional[Any]=torch.floataa , snake_case_ : Optional[int]=0 ):
UpperCamelCase_: Any = torch.Generator(device=__lowerCamelCase ).manual_seed(__lowerCamelCase )
UpperCamelCase_: Any = np.random.RandomState(__lowerCamelCase ).standard_normal((1, 4, 64, 64) )
UpperCamelCase_: Optional[int] = torch.from_numpy(__lowerCamelCase ).to(device=__lowerCamelCase , dtype=__lowerCamelCase )
UpperCamelCase_: Dict = {
"""prompt""": """a photograph of an astronaut riding a horse""",
"""latents""": latents,
"""generator""": generator,
"""num_inference_steps""": 3,
"""guidance_scale""": 7.5,
"""output_type""": """numpy""",
}
return inputs
def lowerCAmelCase__ ( self : List[Any] ):
UpperCamelCase_: Optional[Any] = DiffusionPipeline.from_pretrained("""stabilityai/stable-diffusion-2-base""" )
pipe.to(__lowerCamelCase )
pipe.set_progress_bar_config(disable=__lowerCamelCase )
UpperCamelCase_: Optional[int] = self.get_inputs(__lowerCamelCase )
UpperCamelCase_: Optional[Any] = pipe(**__lowerCamelCase ).images
UpperCamelCase_: Optional[Any] = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 512, 512, 3)
UpperCamelCase_: List[str] = np.array([0.4_9493, 0.4_7896, 0.4_0798, 0.5_4214, 0.5_3212, 0.4_8202, 0.4_7656, 0.4_6329, 0.4_8506] )
assert np.abs(image_slice - expected_slice ).max() < 7e-3
| 367 |
import json
import os
import tempfile
import datasets
from utils import generate_example_dataset, get_duration
lowerCamelCase_ : Any = 5_00_00
lowerCamelCase_ : Any = 50_00
lowerCamelCase_ , lowerCamelCase_ : Tuple = os.path.split(__file__)
lowerCamelCase_ : int = os.path.join(RESULTS_BASEPATH, """results""", RESULTS_FILENAME.replace(""".py""", """.json"""))
@get_duration
def A__ ( lowerCamelCase , lowerCamelCase ) -> Tuple:
for i in range(lowerCamelCase ):
UpperCamelCase_: Dict = dataset[i]
@get_duration
def A__ ( lowerCamelCase , lowerCamelCase , lowerCamelCase ) -> Tuple:
for i in range(0 , len(lowerCamelCase ) , lowerCamelCase ):
UpperCamelCase_: List[Any] = dataset[i : i + batch_size]
@get_duration
def A__ ( lowerCamelCase , lowerCamelCase , lowerCamelCase ) -> Any:
with dataset.formatted_as(type=lowerCamelCase ):
for i in range(lowerCamelCase ):
UpperCamelCase_: List[str] = dataset[i]
@get_duration
def A__ ( lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ) -> Optional[int]:
with dataset.formatted_as(type=lowerCamelCase ):
for i in range(0 , lowerCamelCase , lowerCamelCase ):
UpperCamelCase_: Union[str, Any] = dataset[i : i + batch_size]
def A__ ( ) -> Tuple:
UpperCamelCase_: int = {"""num examples""": SPEED_TEST_N_EXAMPLES}
UpperCamelCase_: Union[str, Any] = [
(read, {"""length""": SMALL_TEST}),
(read, {"""length""": SPEED_TEST_N_EXAMPLES}),
(read_batch, {"""length""": SPEED_TEST_N_EXAMPLES, """batch_size""": 10}),
(read_batch, {"""length""": SPEED_TEST_N_EXAMPLES, """batch_size""": 1_00}),
(read_batch, {"""length""": SPEED_TEST_N_EXAMPLES, """batch_size""": 10_00}),
(read_formatted, {"""type""": """numpy""", """length""": SMALL_TEST}),
(read_formatted, {"""type""": """pandas""", """length""": SMALL_TEST}),
(read_formatted, {"""type""": """torch""", """length""": SMALL_TEST}),
(read_formatted, {"""type""": """tensorflow""", """length""": SMALL_TEST}),
(read_formatted_batch, {"""type""": """numpy""", """length""": SMALL_TEST, """batch_size""": 10}),
(read_formatted_batch, {"""type""": """numpy""", """length""": SMALL_TEST, """batch_size""": 10_00}),
]
UpperCamelCase_: Tuple = [
(read, {"""length""": SMALL_TEST}),
(read, {"""length""": SPEED_TEST_N_EXAMPLES}),
(read_batch, {"""length""": SPEED_TEST_N_EXAMPLES, """batch_size""": 10}),
(read_batch, {"""length""": SPEED_TEST_N_EXAMPLES, """batch_size""": 1_00}),
(read_batch, {"""length""": SPEED_TEST_N_EXAMPLES, """batch_size""": 10_00}),
(read_formatted, {"""type""": """numpy""", """length""": SMALL_TEST}),
(read_formatted_batch, {"""type""": """numpy""", """length""": SMALL_TEST, """batch_size""": 10}),
(read_formatted_batch, {"""type""": """numpy""", """length""": SMALL_TEST, """batch_size""": 10_00}),
]
with tempfile.TemporaryDirectory() as tmp_dir:
print("""generating dataset""" )
UpperCamelCase_: int = datasets.Features(
{"""list""": datasets.Sequence(datasets.Value("""float32""" ) ), """numbers""": datasets.Value("""float32""" )} )
UpperCamelCase_: Optional[int] = generate_example_dataset(
os.path.join(lowerCamelCase , """dataset.arrow""" ) , lowerCamelCase , num_examples=lowerCamelCase , seq_shapes={"""list""": (1_00,)} , )
print("""first set of iterations""" )
for func, kwargs in functions:
print(func.__name__ , str(lowerCamelCase ) )
UpperCamelCase_: List[Any] = func(lowerCamelCase , **lowerCamelCase )
print("""shuffling dataset""" )
UpperCamelCase_: Dict = dataset.shuffle()
print("""Second set of iterations (after shuffling""" )
for func, kwargs in functions_shuffled:
print("""shuffled """ , func.__name__ , str(lowerCamelCase ) )
UpperCamelCase_: List[Any] = func(
lowerCamelCase , **lowerCamelCase )
with open(lowerCamelCase , """wb""" ) as f:
f.write(json.dumps(lowerCamelCase ).encode("""utf-8""" ) )
if __name__ == "__main__": # useful to run the profiler
benchmark_iterating()
| 223 | 0 |
'''simple docstring'''
import json
import os
from pathlib import Path
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple, Union
import sentencepiece
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
lowercase : Any = logging.get_logger(__name__)
lowercase : Optional[int] = '''▁'''
lowercase : Tuple = {
'''vocab_file''': '''vocab.json''',
'''spm_file''': '''sentencepiece.bpe.model''',
}
lowercase : Any = {
'''vocab_file''': {
'''facebook/s2t-small-librispeech-asr''': (
'''https://huggingface.co/facebook/s2t-small-librispeech-asr/resolve/main/vocab.json'''
),
},
'''spm_file''': {
'''facebook/s2t-small-librispeech-asr''': (
'''https://huggingface.co/facebook/s2t-small-librispeech-asr/resolve/main/sentencepiece.bpe.model'''
)
},
}
lowercase : Optional[int] = {
'''facebook/s2t-small-librispeech-asr''': 1024,
}
lowercase : str = ['''pt''', '''fr''', '''ru''', '''nl''', '''ro''', '''it''', '''es''', '''de''']
lowercase : Tuple = {'''mustc''': MUSTC_LANGS}
class __UpperCAmelCase ( lowerCamelCase_ ):
__lowercase = VOCAB_FILES_NAMES
__lowercase = PRETRAINED_VOCAB_FILES_MAP
__lowercase = MAX_MODEL_INPUT_SIZES
__lowercase = ["""input_ids""", """attention_mask"""]
__lowercase = []
def __init__( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_="<s>" , lowerCAmelCase_="</s>" , lowerCAmelCase_="<pad>" , lowerCAmelCase_="<unk>" , lowerCAmelCase_=False , lowerCAmelCase_=False , lowerCAmelCase_=None , lowerCAmelCase_=None , lowerCAmelCase_ = None , **lowerCAmelCase_ , ):
"""simple docstring"""
_snake_case = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=_lowercase , eos_token=_lowercase , unk_token=_lowercase , pad_token=_lowercase , do_upper_case=_lowercase , do_lower_case=_lowercase , tgt_lang=_lowercase , lang_codes=_lowercase , sp_model_kwargs=self.sp_model_kwargs , **_lowercase , )
_snake_case = do_upper_case
_snake_case = do_lower_case
_snake_case = load_json(_lowercase )
_snake_case = {v: k for k, v in self.encoder.items()}
_snake_case = spm_file
_snake_case = load_spm(_lowercase , self.sp_model_kwargs )
if lang_codes is not None:
_snake_case = lang_codes
_snake_case = LANGUAGES[lang_codes]
_snake_case = [F'<lang:{lang}>' for lang in self.langs]
_snake_case = {lang: self.sp_model.PieceToId(F'<lang:{lang}>' ) for lang in self.langs}
_snake_case = self.lang_tokens
_snake_case = tgt_lang if tgt_lang is not None else self.langs[0]
self.set_tgt_lang_special_tokens(self._tgt_lang )
else:
_snake_case = {}
@property
def lowerCamelCase ( self ):
"""simple docstring"""
return len(self.encoder )
@property
def lowerCamelCase ( self ):
"""simple docstring"""
return self._tgt_lang
@tgt_lang.setter
def lowerCamelCase ( self , lowerCAmelCase_ ):
"""simple docstring"""
_snake_case = new_tgt_lang
self.set_tgt_lang_special_tokens(_lowercase )
def lowerCamelCase ( self , lowerCAmelCase_ ):
"""simple docstring"""
_snake_case = self.lang_code_to_id[tgt_lang]
_snake_case = [lang_code_id]
def lowerCamelCase ( self , lowerCAmelCase_ ):
"""simple docstring"""
return self.sp_model.encode(_lowercase , out_type=_lowercase )
def lowerCamelCase ( self , lowerCAmelCase_ ):
"""simple docstring"""
return self.encoder.get(_lowercase , self.encoder[self.unk_token] )
def lowerCamelCase ( self , lowerCAmelCase_ ):
"""simple docstring"""
return self.decoder.get(_lowercase , self.unk_token )
def lowerCamelCase ( self , lowerCAmelCase_ ):
"""simple docstring"""
_snake_case = []
_snake_case = ''
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
_snake_case = self.sp_model.decode(_lowercase )
out_string += (decoded.upper() if self.do_upper_case else decoded) + token + " "
_snake_case = []
else:
current_sub_tokens.append(_lowercase )
_snake_case = self.sp_model.decode(_lowercase )
out_string += decoded.upper() if self.do_upper_case else decoded
return out_string.strip()
def lowerCamelCase ( self , lowerCAmelCase_ , lowerCAmelCase_=None ):
"""simple docstring"""
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + [self.eos_token_id]
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + [self.eos_token_id]
def lowerCamelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ = None , lowerCAmelCase_ = False ):
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_lowercase , token_ids_a=_lowercase , already_has_special_tokens=_lowercase )
_snake_case = [1] * len(self.prefix_tokens )
_snake_case = [1]
if token_ids_a is None:
return prefix_ones + ([0] * len(_lowercase )) + suffix_ones
return prefix_ones + ([0] * len(_lowercase )) + ([0] * len(_lowercase )) + suffix_ones
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case = self.encoder.copy()
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ):
"""simple docstring"""
_snake_case = self.__dict__.copy()
_snake_case = None
return state
def __setstate__( self , lowerCAmelCase_ ):
"""simple docstring"""
_snake_case = d
# for backward compatibility
if not hasattr(self , 'sp_model_kwargs' ):
_snake_case = {}
_snake_case = load_spm(self.spm_file , self.sp_model_kwargs )
def lowerCamelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ = None ):
"""simple docstring"""
_snake_case = Path(_lowercase )
assert save_dir.is_dir(), F'{save_directory} should be a directory'
_snake_case = save_dir / (
(filename_prefix + '-' if filename_prefix else '') + self.vocab_files_names['vocab_file']
)
_snake_case = save_dir / (
(filename_prefix + '-' if filename_prefix else '') + self.vocab_files_names['spm_file']
)
save_json(self.encoder , _lowercase )
if os.path.abspath(self.spm_file ) != os.path.abspath(_lowercase ) and os.path.isfile(self.spm_file ):
copyfile(self.spm_file , _lowercase )
elif not os.path.isfile(self.spm_file ):
with open(_lowercase , 'wb' ) as fi:
_snake_case = self.sp_model.serialized_model_proto()
fi.write(_lowercase )
return (str(_lowercase ), str(_lowercase ))
def SCREAMING_SNAKE_CASE__ ( __A , __A ) -> sentencepiece.SentencePieceProcessor:
_snake_case = sentencepiece.SentencePieceProcessor(**__UpperCamelCase )
spm.Load(str(__UpperCamelCase ) )
return spm
def SCREAMING_SNAKE_CASE__ ( __A ) -> Union[Dict, List]:
with open(__UpperCamelCase , 'r' ) as f:
return json.load(__UpperCamelCase )
def SCREAMING_SNAKE_CASE__ ( __A , __A ) -> None:
with open(__UpperCamelCase , 'w' ) as f:
json.dump(__UpperCamelCase , __UpperCamelCase , indent=2 )
| 42 | # coding=utf-8
# Copyright 2023 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# this script dumps information about the environment
import os
import platform
import sys
__lowerCamelCase : Union[str, Any] = '''3'''
print('''Python version:''', sys.version)
print('''OS platform:''', platform.platform())
print('''OS architecture:''', platform.machine())
try:
import torch
print('''Torch version:''', torch.__version__)
print('''Cuda available:''', torch.cuda.is_available())
print('''Cuda version:''', torch.version.cuda)
print('''CuDNN version:''', torch.backends.cudnn.version())
print('''Number of GPUs available:''', torch.cuda.device_count())
except ImportError:
print('''Torch version:''', None)
try:
import transformers
print('''transformers version:''', transformers.__version__)
except ImportError:
print('''transformers version:''', None)
| 219 | 0 |
import datasets
lowercase_ = """\
@InProceedings{conneau2018xnli,
author = \"Conneau, Alexis
and Rinott, Ruty
and Lample, Guillaume
and Williams, Adina
and Bowman, Samuel R.
and Schwenk, Holger
and Stoyanov, Veselin\",
title = \"XNLI: Evaluating Cross-lingual Sentence Representations\",
booktitle = \"Proceedings of the 2018 Conference on Empirical Methods
in Natural Language Processing\",
year = \"2018\",
publisher = \"Association for Computational Linguistics\",
location = \"Brussels, Belgium\",
}
"""
lowercase_ = """\
XNLI is a subset of a few thousand examples from MNLI which has been translated
into a 14 different languages (some low-ish resource). As with MNLI, the goal is
to predict textual entailment (does sentence A imply/contradict/neither sentence
B) and is a classification task (given two sentences, predict one of three
labels).
"""
lowercase_ = """
Computes XNLI score which is just simple accuracy.
Args:
predictions: Predicted labels.
references: Ground truth labels.
Returns:
'accuracy': accuracy
Examples:
>>> predictions = [0, 1]
>>> references = [0, 1]
>>> xnli_metric = datasets.load_metric(\"xnli\")
>>> results = xnli_metric.compute(predictions=predictions, references=references)
>>> print(results)
{'accuracy': 1.0}
"""
def __UpperCamelCase (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> List[str]:
return (preds == labels).mean()
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class SCREAMING_SNAKE_CASE (datasets.Metric ):
def SCREAMING_SNAKE_CASE_ ( self : int )-> Union[str, Any]:
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Value('int64' if self.config_name != 'sts-b' else 'float32' ),
'references': datasets.Value('int64' if self.config_name != 'sts-b' else 'float32' ),
} ) , codebase_urls=[] , reference_urls=[] , format='numpy' , )
def SCREAMING_SNAKE_CASE_ ( self : int , a : Tuple , a : List[str] )-> List[str]:
"""simple docstring"""
return {"accuracy": simple_accuracy(a , a )}
| 269 |
import argparse
import torch
from torch import nn
from transformers import MBartConfig, MBartForConditionalGeneration
def __UpperCamelCase (_SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
lowercase__ = [
'encoder.version',
'decoder.version',
'model.encoder.version',
'model.decoder.version',
'_float_tensor',
'decoder.output_projection.weight',
]
for k in ignore_keys:
state_dict.pop(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def __UpperCamelCase (_SCREAMING_SNAKE_CASE ) -> Dict:
lowercase__ , lowercase__ = emb.weight.shape
lowercase__ = nn.Linear(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , bias=_SCREAMING_SNAKE_CASE )
lowercase__ = emb.weight.data
return lin_layer
def __UpperCamelCase (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE="facebook/mbart-large-en-ro" , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=False ) -> str:
lowercase__ = torch.load(_SCREAMING_SNAKE_CASE , map_location='cpu' )['model']
remove_ignore_keys_(_SCREAMING_SNAKE_CASE )
lowercase__ = state_dict['encoder.embed_tokens.weight'].shape[0]
lowercase__ = MBartConfig.from_pretrained(_SCREAMING_SNAKE_CASE , vocab_size=_SCREAMING_SNAKE_CASE )
if mbart_aa and finetuned:
lowercase__ = 'relu'
lowercase__ = state_dict['decoder.embed_tokens.weight']
lowercase__ = MBartForConditionalGeneration(_SCREAMING_SNAKE_CASE )
model.model.load_state_dict(_SCREAMING_SNAKE_CASE )
if finetuned:
lowercase__ = make_linear_from_emb(model.model.shared )
return model
if __name__ == "__main__":
lowercase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""fairseq_path""", type=str, help="""bart.large, bart.large.cnn or a path to a model.pt on local filesystem."""
)
parser.add_argument("""pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument(
"""--hf_config""",
default="""facebook/mbart-large-cc25""",
type=str,
help="""Which huggingface architecture to use: mbart-large""",
)
parser.add_argument("""--mbart_50""", action="""store_true""", help="""whether the model is mMART-50 checkpoint""")
parser.add_argument("""--finetuned""", action="""store_true""", help="""whether the model is a fine-tuned checkpoint""")
lowercase_ = parser.parse_args()
lowercase_ = convert_fairseq_mbart_checkpoint_from_disk(
args.fairseq_path, hf_config_path=args.hf_config, finetuned=args.finetuned, mbart_aa=args.mbart_aa
)
model.save_pretrained(args.pytorch_dump_folder_path)
| 269 | 1 |
import unittest
import numpy as np
from diffusers import OnnxStableDiffusionInpaintPipelineLegacy
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
load_numpy,
nightly,
require_onnxruntime,
require_torch_gpu,
)
if is_onnx_available():
import onnxruntime as ort
@nightly
@require_onnxruntime
@require_torch_gpu
class lowerCamelCase__ ( unittest.TestCase):
'''simple docstring'''
@property
def _lowerCamelCase ( self :Any ) -> Tuple:
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def _lowerCamelCase ( self :Optional[int] ) -> Dict:
__UpperCamelCase : int = ort.SessionOptions()
__UpperCamelCase : int = False
return options
def _lowerCamelCase ( self :Any ) -> List[str]:
__UpperCamelCase : Optional[Any] = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/in_paint/overture-creations-5sI6fQgYIuo.png" )
__UpperCamelCase : Tuple = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/in_paint/overture-creations-5sI6fQgYIuo_mask.png" )
__UpperCamelCase : Tuple = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/in_paint/red_cat_sitting_on_a_park_bench_onnx.npy" )
# using the PNDM scheduler by default
__UpperCamelCase : Tuple = OnnxStableDiffusionInpaintPipelineLegacy.from_pretrained(
"CompVis/stable-diffusion-v1-4" , revision="onnx" , safety_checker=a , feature_extractor=a , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=a )
__UpperCamelCase : List[str] = "A red cat sitting on a park bench"
__UpperCamelCase : List[Any] = np.random.RandomState(0 )
__UpperCamelCase : Union[str, Any] = pipe(
prompt=a , image=a , mask_image=a , strength=0.75 , guidance_scale=7.5 , num_inference_steps=1_5 , generator=a , output_type="np" , )
__UpperCamelCase : Optional[int] = output.images[0]
assert image.shape == (5_1_2, 5_1_2, 3)
assert np.abs(expected_image - image ).max() < 1E-2 | 232 |
# XXX: we want transformers master here - in the absense of conftest manipulating sys.path:
# hack it in for now:
import sys
from pathlib import Path
lowercase : Optional[int] = Path(__file__).resolve().parents[3] / 'src'
sys.path.insert(1, str(git_repo_path))
import dataclasses # noqa
import io # noqa
import itertools # noqa
import json # noqa
import os # noqa
import unittest # noqa
from copy import deepcopy # noqa
from parameterized import parameterized # noqa
from transformers import TrainingArguments, is_torch_available # noqa
from transformers.deepspeed import is_deepspeed_available # noqa
from transformers.file_utils import WEIGHTS_NAME # noqa
from transformers.testing_utils import ( # noqa
CaptureLogger,
ExtendSysPath,
TestCasePlus,
execute_subprocess_async,
get_gpu_count,
mockenv_context,
require_deepspeed,
require_torch_gpu,
require_torch_multi_gpu,
slow,
)
from transformers.trainer_utils import set_seed # noqa
set_seed(42)
lowercase : Optional[Any] = {'base': 'patrickvonplaten/wav2vec2_tiny_random', 'robust': 'patrickvonplaten/wav2vec2_tiny_random_robust'}
lowercase : str = 'zero2'
lowercase : Optional[int] = 'zero3'
lowercase : Optional[Any] = [ZEROa, ZEROa]
def _SCREAMING_SNAKE_CASE ( _lowerCamelCase : str , _lowerCamelCase : Union[str, Any] , _lowerCamelCase : List[str]) -> int:
'''simple docstring'''
__UpperCamelCase : Union[str, Any] = parameterized.to_safe_name("_".join(str(_lowerCamelCase) for x in param.args))
return F'{func.__name__}_{param_based_name}'
# Cartesian-product of zero stages with models to test
lowercase : List[str] = list(itertools.product(stages, models.keys()))
@slow
@require_deepspeed
@require_torch_gpu
class lowerCamelCase__ ( __lowercase):
'''simple docstring'''
@parameterized.expand(a , name_func=a )
def _lowerCamelCase ( self :Dict , a :Optional[Any] , a :str ) -> Optional[int]:
self.run_and_check(
stage=a , model=a , distributed=a , fpaa=a , )
@require_torch_multi_gpu
@parameterized.expand(a , name_func=a )
def _lowerCamelCase ( self :List[str] , a :str , a :str ) -> List[Any]:
self.run_and_check(
stage=a , model=a , distributed=a , fpaa=a , )
@parameterized.expand(a , name_func=a )
def _lowerCamelCase ( self :List[Any] , a :List[str] , a :int ) -> Optional[int]:
self.run_and_check(
stage=a , model=a , distributed=a , fpaa=a , )
@require_torch_multi_gpu
@parameterized.expand(a , name_func=a )
def _lowerCamelCase ( self :List[str] , a :List[Any] , a :Dict ) -> int:
self.run_and_check(
stage=a , model=a , distributed=a , fpaa=a , )
def _lowerCamelCase ( self :Any , a :List[str] ) -> Optional[Any]:
# XXX: run_asr is premature and doesn't save any results
# so all we check for now is that the process didn't fail
pass
def _lowerCamelCase ( self :Optional[Any] , a :str , a :str , a :int = 1_0 , a :bool = True , a :bool = True , a :bool = True , ) -> Any:
__UpperCamelCase : Optional[Any] = models[model]
__UpperCamelCase : List[Any] = self.run_trainer(
stage=a , model_name=a , eval_steps=a , num_train_epochs=1 , distributed=a , fpaa=a , )
self.do_checks(a )
return output_dir
def _lowerCamelCase ( self :List[str] , a :str , a :str , a :int = 1_0 , a :int = 1 , a :bool = True , a :bool = True , ) -> Dict:
__UpperCamelCase : int = self.get_auto_remove_tmp_dir("./xxx" , after=a )
__UpperCamelCase : int = f'\n --model_name_or_path {model_name}\n --dataset_name hf-internal-testing/librispeech_asr_dummy\n --dataset_config_name clean\n --train_split_name validation\n --validation_split_name validation\n --output_dir {output_dir}\n --num_train_epochs {str(a )}\n --per_device_train_batch_size 2\n --per_device_eval_batch_size 2\n --evaluation_strategy steps\n --learning_rate 5e-4\n --warmup_steps 8\n --orthography timit\n --preprocessing_num_workers 1\n --group_by_length\n --freeze_feature_extractor\n --report_to none\n --save_steps 0\n --eval_steps {eval_steps}\n --report_to none\n '.split()
if fpaa:
args.extend(["--fp16"] )
# currently ds_config_wav2vec2_zero.json requires "zero_optimization.find_unused_parameters": true,
# hence the separate config files
__UpperCamelCase : Dict = f'--deepspeed {self.test_file_dir_str}/ds_config_wav2vec2_{stage}.json'.split()
__UpperCamelCase : int = [f'{self.examples_dir_str}/research_projects/wav2vec2/run_asr.py']
__UpperCamelCase : Optional[Any] = self.get_launcher(a )
__UpperCamelCase : Optional[int] = launcher + script + args + ds_args
# keep for quick debug
# print(" ".join([f"\nPYTHONPATH={self.src_dir_str}"] +cmd)); die
execute_subprocess_async(a , env=self.get_env() )
return output_dir
def _lowerCamelCase ( self :Any , a :List[Any]=False ) -> List[Any]:
# 1. explicitly set --num_nodes=1 just in case these tests end up run on a multi-node setup
# - it won't be able to handle that
# 2. for now testing with just 2 gpus max (since some quality tests may give different
# results with mode gpus because we use very little data)
__UpperCamelCase : List[Any] = min(2 , get_gpu_count() ) if distributed else 1
return f'deepspeed --num_nodes 1 --num_gpus {num_gpus}'.split() | 232 | 1 |
"""simple docstring"""
import os
import tempfile
from functools import partial
from unittest import TestCase
from unittest.mock import patch
import numpy as np
import pytest
from datasets.arrow_dataset import Dataset
from datasets.search import ElasticSearchIndex, FaissIndex, MissingIndex
from .utils import require_elasticsearch, require_faiss
SCREAMING_SNAKE_CASE_ : Optional[int] = pytest.mark.integration
@require_faiss
class a ( _lowerCamelCase ):
"""simple docstring"""
def UpperCamelCase ( self: Tuple ):
"""simple docstring"""
A__ = Dataset.from_dict({"""filename""": ["""my_name-train""" + """_""" + str(UpperCamelCase ) for x in np.arange(30 ).tolist()]} )
return dset
def UpperCamelCase ( self: Tuple ):
"""simple docstring"""
import faiss
A__ = self._create_dummy_dataset()
A__ = dset.map(
lambda UpperCamelCase , UpperCamelCase : {"vecs": i * np.ones(5 , dtype=np.floataa )} , with_indices=UpperCamelCase , keep_in_memory=UpperCamelCase )
A__ = dset.add_faiss_index("""vecs""" , batch_size=1_00 , metric_type=faiss.METRIC_INNER_PRODUCT )
A__ , A__ = dset.get_nearest_examples("""vecs""" , np.ones(5 , dtype=np.floataa ) )
self.assertEqual(examples["""filename"""][0] , """my_name-train_29""" )
dset.drop_index("""vecs""" )
def UpperCamelCase ( self: Dict ):
"""simple docstring"""
import faiss
A__ = self._create_dummy_dataset()
dset.add_faiss_index_from_external_arrays(
external_arrays=np.ones((30, 5) ) * np.arange(30 ).reshape(-1 , 1 ) , index_name="""vecs""" , batch_size=1_00 , metric_type=faiss.METRIC_INNER_PRODUCT , )
A__ , A__ = dset.get_nearest_examples("""vecs""" , np.ones(5 , dtype=np.floataa ) )
self.assertEqual(examples["""filename"""][0] , """my_name-train_29""" )
def UpperCamelCase ( self: Optional[int] ):
"""simple docstring"""
import faiss
A__ = self._create_dummy_dataset()
dset.add_faiss_index_from_external_arrays(
external_arrays=np.ones((30, 5) ) * np.arange(30 ).reshape(-1 , 1 ) , index_name="""vecs""" , metric_type=faiss.METRIC_INNER_PRODUCT , )
# Setting delete=False and unlinking manually is not pretty... but it is required on Windows to
# ensure somewhat stable behaviour. If we don't, we get PermissionErrors. This is an age-old issue.
# see https://bugs.python.org/issue14243 and
# https://stackoverflow.com/questions/23212435/permission-denied-to-write-to-my-temporary-file/23212515
with tempfile.NamedTemporaryFile(delete=UpperCamelCase ) as tmp_file:
dset.save_faiss_index("""vecs""" , tmp_file.name )
dset.load_faiss_index("""vecs2""" , tmp_file.name )
os.unlink(tmp_file.name )
A__ , A__ = dset.get_nearest_examples("""vecs2""" , np.ones(5 , dtype=np.floataa ) )
self.assertEqual(examples["""filename"""][0] , """my_name-train_29""" )
def UpperCamelCase ( self: List[str] ):
"""simple docstring"""
A__ = self._create_dummy_dataset()
dset.add_faiss_index_from_external_arrays(
external_arrays=np.ones((30, 5) ) * np.arange(30 ).reshape(-1 , 1 ) , index_name="""vecs""" )
dset.drop_index("""vecs""" )
self.assertRaises(UpperCamelCase , partial(dset.get_nearest_examples , """vecs2""" , np.ones(5 , dtype=np.floataa ) ) )
def UpperCamelCase ( self: str ):
"""simple docstring"""
from elasticsearch import Elasticsearch
A__ = self._create_dummy_dataset()
with patch("""elasticsearch.Elasticsearch.search""" ) as mocked_search, patch(
"""elasticsearch.client.IndicesClient.create""" ) as mocked_index_create, patch("""elasticsearch.helpers.streaming_bulk""" ) as mocked_bulk:
A__ = {"""acknowledged""": True}
mocked_bulk.return_value([(True, None)] * 30 )
A__ = {"""hits""": {"""hits""": [{"""_score""": 1, """_id""": 29}]}}
A__ = Elasticsearch()
dset.add_elasticsearch_index("""filename""" , es_client=UpperCamelCase )
A__ , A__ = dset.get_nearest_examples("""filename""" , """my_name-train_29""" )
self.assertEqual(examples["""filename"""][0] , """my_name-train_29""" )
@require_faiss
class a ( _lowerCamelCase ):
"""simple docstring"""
def UpperCamelCase ( self: Optional[Any] ):
"""simple docstring"""
import faiss
A__ = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT )
# add vectors
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsNotNone(index.faiss_index )
self.assertEqual(index.faiss_index.ntotal , 5 )
index.add_vectors(np.zeros((5, 5) , dtype=np.floataa ) )
self.assertEqual(index.faiss_index.ntotal , 10 )
# single query
A__ = np.zeros(5 , dtype=np.floataa )
A__ = 1
A__ , A__ = index.search(UpperCamelCase )
self.assertRaises(UpperCamelCase , index.search , query.reshape(-1 , 1 ) )
self.assertGreater(scores[0] , 0 )
self.assertEqual(indices[0] , 1 )
# batched queries
A__ = np.eye(5 , dtype=np.floataa )[::-1]
A__ , A__ = index.search_batch(UpperCamelCase )
self.assertRaises(UpperCamelCase , index.search_batch , queries[0] )
A__ = [scores[0] for scores in total_scores]
A__ = [indices[0] for indices in total_indices]
self.assertGreater(np.min(UpperCamelCase ) , 0 )
self.assertListEqual([4, 3, 2, 1, 0] , UpperCamelCase )
def UpperCamelCase ( self: Dict ):
"""simple docstring"""
import faiss
A__ = FaissIndex(string_factory="""Flat""" )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsInstance(index.faiss_index , faiss.IndexFlat )
A__ = FaissIndex(string_factory="""LSH""" )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsInstance(index.faiss_index , faiss.IndexLSH )
with self.assertRaises(UpperCamelCase ):
A__ = FaissIndex(string_factory="""Flat""" , custom_index=faiss.IndexFlat(5 ) )
def UpperCamelCase ( self: int ):
"""simple docstring"""
import faiss
A__ = faiss.IndexFlat(5 )
A__ = FaissIndex(custom_index=UpperCamelCase )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsInstance(index.faiss_index , faiss.IndexFlat )
def UpperCamelCase ( self: str ):
"""simple docstring"""
import faiss
A__ = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
# Setting delete=False and unlinking manually is not pretty... but it is required on Windows to
# ensure somewhat stable behaviour. If we don't, we get PermissionErrors. This is an age-old issue.
# see https://bugs.python.org/issue14243 and
# https://stackoverflow.com/questions/23212435/permission-denied-to-write-to-my-temporary-file/23212515
with tempfile.NamedTemporaryFile(delete=UpperCamelCase ) as tmp_file:
index.save(tmp_file.name )
A__ = FaissIndex.load(tmp_file.name )
os.unlink(tmp_file.name )
A__ = np.zeros(5 , dtype=np.floataa )
A__ = 1
A__ , A__ = index.search(UpperCamelCase )
self.assertGreater(scores[0] , 0 )
self.assertEqual(indices[0] , 1 )
@require_faiss
def _snake_case ( UpperCAmelCase_ : Dict ):
import faiss
A__ = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
A__ = """index.faiss"""
A__ = F"""mock://{index_name}"""
index.save(UpperCAmelCase_ , storage_options=mockfs.storage_options )
A__ = FaissIndex.load(UpperCAmelCase_ , storage_options=mockfs.storage_options )
A__ = np.zeros(5 , dtype=np.floataa )
A__ = 1
A__ , A__ = index.search(UpperCAmelCase_ )
assert scores[0] > 0
assert indices[0] == 1
@require_elasticsearch
class a ( _lowerCamelCase ):
"""simple docstring"""
def UpperCamelCase ( self: int ):
"""simple docstring"""
from elasticsearch import Elasticsearch
with patch("""elasticsearch.Elasticsearch.search""" ) as mocked_search, patch(
"""elasticsearch.client.IndicesClient.create""" ) as mocked_index_create, patch("""elasticsearch.helpers.streaming_bulk""" ) as mocked_bulk:
A__ = Elasticsearch()
A__ = {"""acknowledged""": True}
A__ = ElasticSearchIndex(es_client=UpperCamelCase )
mocked_bulk.return_value([(True, None)] * 3 )
index.add_documents(["""foo""", """bar""", """foobar"""] )
# single query
A__ = """foo"""
A__ = {"""hits""": {"""hits""": [{"""_score""": 1, """_id""": 0}]}}
A__ , A__ = index.search(UpperCamelCase )
self.assertEqual(scores[0] , 1 )
self.assertEqual(indices[0] , 0 )
# single query with timeout
A__ = """foo"""
A__ = {"""hits""": {"""hits""": [{"""_score""": 1, """_id""": 0}]}}
A__ , A__ = index.search(UpperCamelCase , request_timeout=30 )
self.assertEqual(scores[0] , 1 )
self.assertEqual(indices[0] , 0 )
# batched queries
A__ = ["""foo""", """bar""", """foobar"""]
A__ = {"""hits""": {"""hits""": [{"""_score""": 1, """_id""": 1}]}}
A__ , A__ = index.search_batch(UpperCamelCase )
A__ = [scores[0] for scores in total_scores]
A__ = [indices[0] for indices in total_indices]
self.assertGreater(np.min(UpperCamelCase ) , 0 )
self.assertListEqual([1, 1, 1] , UpperCamelCase )
# batched queries with timeout
A__ = ["""foo""", """bar""", """foobar"""]
A__ = {"""hits""": {"""hits""": [{"""_score""": 1, """_id""": 1}]}}
A__ , A__ = index.search_batch(UpperCamelCase , request_timeout=30 )
A__ = [scores[0] for scores in total_scores]
A__ = [indices[0] for indices in total_indices]
self.assertGreater(np.min(UpperCamelCase ) , 0 )
self.assertListEqual([1, 1, 1] , UpperCamelCase )
| 69 |
"""simple docstring"""
from collections import UserDict
from typing import Union
import numpy as np
import requests
from ..utils import (
add_end_docstrings,
logging,
)
from .audio_classification import ffmpeg_read
from .base import PIPELINE_INIT_ARGS, Pipeline
SCREAMING_SNAKE_CASE_ : Tuple = logging.get_logger(__name__)
@add_end_docstrings(_lowerCamelCase )
class a ( _lowerCamelCase ):
"""simple docstring"""
def __init__( self: Any , **UpperCamelCase: int ):
"""simple docstring"""
super().__init__(**UpperCamelCase )
if self.framework != "pt":
raise ValueError(f"""The {self.__class__} is only available in PyTorch.""" )
# No specific FOR_XXX available yet
def __call__( self: Tuple , UpperCamelCase: Union[np.ndarray, bytes, str] , **UpperCamelCase: Tuple ):
"""simple docstring"""
return super().__call__(UpperCamelCase , **UpperCamelCase )
def UpperCamelCase ( self: Tuple , **UpperCamelCase: Union[str, Any] ):
"""simple docstring"""
A__ = {}
if "candidate_labels" in kwargs:
A__ = kwargs["""candidate_labels"""]
if "hypothesis_template" in kwargs:
A__ = kwargs["""hypothesis_template"""]
return preprocess_params, {}, {}
def UpperCamelCase ( self: Optional[int] , UpperCamelCase: Tuple , UpperCamelCase: Tuple=None , UpperCamelCase: Optional[Any]="This is a sound of {}." ):
"""simple docstring"""
if isinstance(UpperCamelCase , UpperCamelCase ):
if audio.startswith("""http://""" ) or audio.startswith("""https://""" ):
# We need to actually check for a real protocol, otherwise it's impossible to use a local file
# like http_huggingface_co.png
A__ = requests.get(UpperCamelCase ).content
else:
with open(UpperCamelCase , """rb""" ) as f:
A__ = f.read()
if isinstance(UpperCamelCase , UpperCamelCase ):
A__ = ffmpeg_read(UpperCamelCase , self.feature_extractor.sampling_rate )
if not isinstance(UpperCamelCase , np.ndarray ):
raise ValueError("""We expect a numpy ndarray as input""" )
if len(audio.shape ) != 1:
raise ValueError("""We expect a single channel audio input for ZeroShotAudioClassificationPipeline""" )
A__ = self.feature_extractor(
[audio] , sampling_rate=self.feature_extractor.sampling_rate , return_tensors="""pt""" )
A__ = candidate_labels
A__ = [hypothesis_template.format(UpperCamelCase ) for x in candidate_labels]
A__ = self.tokenizer(UpperCamelCase , return_tensors=self.framework , padding=UpperCamelCase )
A__ = [text_inputs]
return inputs
def UpperCamelCase ( self: Any , UpperCamelCase: Union[str, Any] ):
"""simple docstring"""
A__ = model_inputs.pop("""candidate_labels""" )
A__ = model_inputs.pop("""text_inputs""" )
if isinstance(text_inputs[0] , UpperCamelCase ):
A__ = text_inputs[0]
else:
# Batching case.
A__ = text_inputs[0][0]
A__ = self.model(**UpperCamelCase , **UpperCamelCase )
A__ = {
"""candidate_labels""": candidate_labels,
"""logits""": outputs.logits_per_audio,
}
return model_outputs
def UpperCamelCase ( self: Any , UpperCamelCase: Optional[int] ):
"""simple docstring"""
A__ = model_outputs.pop("""candidate_labels""" )
A__ = model_outputs["""logits"""][0]
if self.framework == "pt":
A__ = logits.softmax(dim=0 )
A__ = probs.tolist()
else:
raise ValueError("""`tf` framework not supported.""" )
A__ = [
{"""score""": score, """label""": candidate_label}
for score, candidate_label in sorted(zip(UpperCamelCase , UpperCamelCase ) , key=lambda UpperCamelCase : -x[0] )
]
return result
| 69 | 1 |
'''simple docstring'''
import tempfile
import unittest
from pathlib import Path
from shutil import copyfile
from transformers import MaMaaaTokenizer, is_torch_available
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
slow,
)
from transformers.utils import is_sentencepiece_available
if is_sentencepiece_available():
from transformers.models.mam_aaa.tokenization_mam_aaa import VOCAB_FILES_NAMES, save_json
from ...test_tokenization_common import TokenizerTesterMixin
if is_sentencepiece_available():
lowercase_ = get_tests_dir("fixtures/test_sentencepiece.model")
if is_torch_available():
from transformers.models.mam_aaa.modeling_mam_aaa import shift_tokens_right
lowercase_ = 128_022
lowercase_ = 128_028
@require_sentencepiece
class __A ( A , unittest.TestCase ):
'''simple docstring'''
__lowerCamelCase : int = MaMaaaTokenizer
__lowerCamelCase : Tuple = False
__lowerCamelCase : List[str] = False
__lowerCamelCase : Optional[int] = True
def a__ (self ) -> List[str]:
"""simple docstring"""
super().setUp()
_a = ['''</s>''', '''<unk>''', '''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est''', '''\u0120''', '''<pad>''']
_a = dict(zip(A , range(len(A ) ) ) )
_a = Path(self.tmpdirname )
save_json(A , save_dir / VOCAB_FILES_NAMES['''vocab_file'''] )
if not (save_dir / VOCAB_FILES_NAMES["spm_file"]).exists():
copyfile(A , save_dir / VOCAB_FILES_NAMES['''spm_file'''] )
_a = MaMaaaTokenizer.from_pretrained(self.tmpdirname )
tokenizer.save_pretrained(self.tmpdirname )
def a__ (self , **A ) -> List[Any]:
"""simple docstring"""
return MaMaaaTokenizer.from_pretrained(self.tmpdirname , **A )
def a__ (self , A ) -> Dict:
"""simple docstring"""
return (
"This is a test",
"This is a test",
)
def a__ (self ) -> Optional[Any]:
"""simple docstring"""
_a = '''</s>'''
_a = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(A ) , A )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(A ) , A )
def a__ (self ) -> Optional[Any]:
"""simple docstring"""
_a = self.get_tokenizer()
_a = list(tokenizer.get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '''</s>''' )
self.assertEqual(vocab_keys[1] , '''<unk>''' )
self.assertEqual(vocab_keys[-1] , '''<s>''' )
self.assertEqual(len(A ) , tokenizer.vocab_size + len(tokenizer.get_added_vocab() ) )
@unittest.skip('''Skip this test while all models are still to be uploaded.''' )
def a__ (self ) -> Dict:
"""simple docstring"""
pass
def a__ (self ) -> List[str]:
"""simple docstring"""
_a = self.get_tokenizer()
_a = tokenizer.tokenize('''This is a test''' )
self.assertListEqual(A , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(A ) , [2, 3, 4, 5, 6] , )
_a = tokenizer.convert_ids_to_tokens([2, 3, 4, 5, 6] )
self.assertListEqual(A , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] )
_a = tokenizer.convert_tokens_to_string(A )
self.assertEqual(A , '''This is a test''' )
@slow
def a__ (self ) -> str:
"""simple docstring"""
_a = {'''input_ids''': [[128_022, 110_108, 397, 11, 38_272, 2_247, 124_811, 285, 18_105, 1_586, 207, 7, 39_534, 4_428, 397, 1_019, 18_105, 1_586, 207, 7, 41_337, 16_786, 241, 7, 20_214, 17, 125_690, 10_398, 7, 44_378, 58_069, 68_342, 7_798, 7_343, 11, 299, 33_310, 4, 158, 37_350, 94_077, 4_569, 299, 33_310, 90, 4, 52_840, 290, 4, 31_270, 112, 299, 682, 4, 52_840, 39_953, 14_079, 193, 52_519, 90_894, 17_894, 120_697, 11, 40_445, 551, 17, 1_019, 52_519, 90_894, 17_756, 963, 11, 40_445, 480, 17, 9_792, 1_120, 5_173, 1_393, 6_240, 16_786, 241, 120_996, 28, 1_245, 1_393, 118_240, 11_123, 1_019, 93_612, 2_691, 10_618, 98_058, 120_409, 1_928, 279, 4, 40_683, 367, 178, 207, 1_019, 103, 103_121, 506, 65_296, 5, 2], [128_022, 21_217, 367, 117, 125_450, 128, 719, 7, 7_308, 40, 93_612, 12_669, 1_116, 16_704, 71, 17_785, 3_699, 15_592, 35, 144, 9_584, 241, 11_943, 713, 950, 799, 2_247, 88_427, 150, 149, 118_813, 120_706, 1_019, 106_906, 81_518, 28, 1_224, 22_799, 397, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [128_022, 1_658, 123_311, 5_155, 5_578, 4_722, 279, 14_947, 2_366, 1_120, 1_197, 14, 1_348, 9_232, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=A , model_name='''facebook/m2m100_418M''' , revision='''c168bae485c864188cf9aa0e4108b0b6934dc91e''' , )
@require_torch
@require_sentencepiece
@require_tokenizers
class __A ( unittest.TestCase ):
'''simple docstring'''
__lowerCamelCase : Any = 'facebook/m2m100_418M'
__lowerCamelCase : Tuple = [
'In my opinion, there are two levels of response from the French government.',
'NSA Affair Emphasizes Complete Lack of Debate on Intelligence',
]
__lowerCamelCase : str = [
'Selon moi, il y a deux niveaux de réponse de la part du gouvernement français.',
'L\'affaire NSA souligne l\'absence totale de débat sur le renseignement',
]
# fmt: off
__lowerCamelCase : Optional[int] = [EN_CODE, 593, 1_949, 115_781, 4, 71_586, 4_234, 60_633, 126_233, 432, 123_808, 15_592, 1_197, 117_132, 120_618, 5, 2]
@classmethod
def a__ (cls ) -> Dict:
"""simple docstring"""
_a = MaMaaaTokenizer.from_pretrained(
cls.checkpoint_name , src_lang='''en''' , tgt_lang='''fr''' )
_a = 1
return cls
def a__ (self ) -> Union[str, Any]:
"""simple docstring"""
self.assertEqual(self.tokenizer.get_lang_id('''ar''' ) , 128_006 )
self.assertEqual(self.tokenizer.get_lang_id('''en''' ) , 128_022 )
self.assertEqual(self.tokenizer.get_lang_id('''ro''' ) , 128_076 )
self.assertEqual(self.tokenizer.get_lang_id('''mr''' ) , 128_063 )
def a__ (self ) -> List[str]:
"""simple docstring"""
_a = self.tokenizer.get_vocab()
self.assertEqual(len(A ) , self.tokenizer.vocab_size )
self.assertEqual(vocab['''<unk>'''] , 3 )
self.assertIn(self.tokenizer.get_lang_token('''en''' ) , A )
def a__ (self ) -> int:
"""simple docstring"""
_a = '''en'''
_a = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0]
self.assertListEqual(self.expected_src_tokens , A )
def a__ (self ) -> List[str]:
"""simple docstring"""
self.assertIn(A , self.tokenizer.all_special_ids )
# fmt: off
_a = [FR_CODE, 5_364, 82, 8_642, 4, 294, 47, 8, 14_028, 136, 3_286, 9_706, 6, 90_797, 6, 144_012, 162, 88_128, 30_061, 5, 2]
# fmt: on
_a = self.tokenizer.decode(A , skip_special_tokens=A )
_a = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=A )
self.assertEqual(A , A )
self.assertNotIn(self.tokenizer.eos_token , A )
def a__ (self ) -> int:
"""simple docstring"""
_a = tempfile.mkdtemp()
_a = self.tokenizer.lang_token_to_id
self.tokenizer.save_pretrained(A )
_a = MaMaaaTokenizer.from_pretrained(A )
self.assertDictEqual(new_tok.lang_token_to_id , A )
@require_torch
def a__ (self ) -> List[Any]:
"""simple docstring"""
_a = '''en'''
_a = '''fr'''
_a = self.tokenizer(self.src_text , text_target=self.tgt_text , padding=A , return_tensors='''pt''' )
_a = shift_tokens_right(
batch['''labels'''] , self.tokenizer.pad_token_id , self.tokenizer.eos_token_id )
for k in batch:
_a = batch[k].tolist()
# batch = {k: v.tolist() for k,v in batch.items()}
# fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4
# batch.decoder_inputs_ids[0][0] ==
assert batch.input_ids[1][0] == EN_CODE
assert batch.input_ids[1][-1] == 2
assert batch.labels[1][0] == FR_CODE
assert batch.labels[1][-1] == 2
assert batch.decoder_input_ids[1][:2] == [2, FR_CODE]
@require_torch
def a__ (self ) -> List[Any]:
"""simple docstring"""
_a = '''mr'''
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id('''mr''' )] )
self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] )
_a = '''zh'''
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id('''zh''' )] )
self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] )
@require_torch
def a__ (self ) -> List[Any]:
"""simple docstring"""
_a = '''mr'''
self.tokenizer._switch_to_target_mode()
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id('''mr''' )] )
self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] )
self.tokenizer._switch_to_input_mode()
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id(self.tokenizer.src_lang )] )
_a = '''zh'''
self.tokenizer._switch_to_target_mode()
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id('''zh''' )] )
self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] )
self.tokenizer._switch_to_input_mode()
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id(self.tokenizer.src_lang )] )
@require_torch
def a__ (self ) -> int:
"""simple docstring"""
_a = self.tokenizer._build_translation_inputs('''A test''' , return_tensors='''pt''' , src_lang='''en''' , tgt_lang='''ar''' )
self.assertEqual(
nested_simplify(A ) , {
# en_XX, A, test, EOS
'''input_ids''': [[128_022, 58, 4_183, 2]],
'''attention_mask''': [[1, 1, 1, 1]],
# ar_AR
'''forced_bos_token_id''': 128_006,
} , )
| 211 |
'''simple docstring'''
import unicodedata
from dataclasses import dataclass
from typing import Optional, Union
import numpy as np
from transformers.data.data_collator import DataCollatorMixin
from transformers.file_utils import PaddingStrategy
from transformers.tokenization_utils_base import PreTrainedTokenizerBase
def lowerCAmelCase (__A , __A , __A , __A):
"""simple docstring"""
if isinstance(__A , __A):
_a = np.full((len(__A), sequence_length, 2) , __A)
else:
_a = np.full((len(__A), sequence_length) , __A)
for i, tensor in enumerate(__A):
if padding_side == "right":
if isinstance(__A , __A):
_a = tensor[:sequence_length]
else:
_a = tensor[:sequence_length]
else:
if isinstance(__A , __A):
_a = tensor[:sequence_length]
else:
_a = tensor[:sequence_length]
return out_tensor.tolist()
def lowerCAmelCase (__A):
"""simple docstring"""
_a = ord(__A)
if (cp >= 33 and cp <= 47) or (cp >= 58 and cp <= 64) or (cp >= 91 and cp <= 96) or (cp >= 123 and cp <= 126):
return True
_a = unicodedata.category(__A)
if cat.startswith('''P'''):
return True
return False
@dataclass
class __A ( A ):
'''simple docstring'''
__lowerCamelCase : PreTrainedTokenizerBase
__lowerCamelCase : Union[bool, str, PaddingStrategy] = True
__lowerCamelCase : Optional[int] = None
__lowerCamelCase : Optional[int] = None
__lowerCamelCase : int = -100
__lowerCamelCase : str = "pt"
def a__ (self , A ) -> List[str]:
"""simple docstring"""
import torch
_a = '''label''' if '''label''' in features[0].keys() else '''labels'''
_a = [feature[label_name] for feature in features] if label_name in features[0].keys() else None
_a = self.tokenizer.pad(
A , padding=self.padding , max_length=self.max_length , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors='''pt''' if labels is None else None , )
if labels is None:
return batch
_a = torch.tensor(batch['''entity_ids'''] ).shape[1]
_a = self.tokenizer.padding_side
if padding_side == "right":
_a = [
list(A ) + [self.label_pad_token_id] * (sequence_length - len(A )) for label in labels
]
else:
_a = [
[self.label_pad_token_id] * (sequence_length - len(A )) + list(A ) for label in labels
]
_a = [feature['''ner_tags'''] for feature in features]
_a = padding_tensor(A , -1 , A , A )
_a = [feature['''original_entity_spans'''] for feature in features]
_a = padding_tensor(A , (-1, -1) , A , A )
_a = {k: torch.tensor(A , dtype=torch.intaa ) for k, v in batch.items()}
return batch
| 211 | 1 |
"""simple docstring"""
from __future__ import annotations
from collections import deque
class UpperCamelCase :
def __init__( self : str , UpperCAmelCase__ : list[str] ) -> Dict:
_a : list[dict] = []
self.adlist.append(
{"""value""": """""", """next_states""": [], """fail_state""": 0, """output""": []} )
for keyword in keywords:
self.add_keyword(UpperCAmelCase__ )
self.set_fail_transitions()
def _lowercase ( self : List[str] , UpperCAmelCase__ : int , UpperCAmelCase__ : str ) -> int | None:
for state in self.adlist[current_state]["next_states"]:
if char == self.adlist[state]["value"]:
return state
return None
def _lowercase ( self : Optional[int] , UpperCAmelCase__ : str ) -> None:
_a : Optional[int] = 0
for character in keyword:
_a : Optional[int] = self.find_next_state(UpperCAmelCase__ , UpperCAmelCase__ )
if next_state is None:
self.adlist.append(
{
"""value""": character,
"""next_states""": [],
"""fail_state""": 0,
"""output""": [],
} )
self.adlist[current_state]["next_states"].append(len(self.adlist ) - 1 )
_a : str = len(self.adlist ) - 1
else:
_a : int = next_state
self.adlist[current_state]["output"].append(UpperCAmelCase__ )
def _lowercase ( self : Any ) -> None:
_a : deque = deque()
for node in self.adlist[0]["next_states"]:
q.append(UpperCAmelCase__ )
_a : int = 0
while q:
_a : Tuple = q.popleft()
for child in self.adlist[r]["next_states"]:
q.append(UpperCAmelCase__ )
_a : str = self.adlist[r]["""fail_state"""]
while (
self.find_next_state(UpperCAmelCase__ , self.adlist[child]["""value"""] ) is None
and state != 0
):
_a : Tuple = self.adlist[state]["""fail_state"""]
_a : List[Any] = self.find_next_state(
UpperCAmelCase__ , self.adlist[child]["""value"""] )
if self.adlist[child]["fail_state"] is None:
_a : str = 0
_a : Dict = (
self.adlist[child]["""output"""]
+ self.adlist[self.adlist[child]["""fail_state"""]]["""output"""]
)
def _lowercase ( self : Tuple , UpperCAmelCase__ : str ) -> dict[str, list[int]]:
_a : dict = {} # returns a dict with keywords and list of its occurrences
_a : str = 0
for i in range(len(UpperCAmelCase__ ) ):
while (
self.find_next_state(UpperCAmelCase__ , string[i] ) is None
and current_state != 0
):
_a : Tuple = self.adlist[current_state]["""fail_state"""]
_a : Optional[Any] = self.find_next_state(UpperCAmelCase__ , string[i] )
if next_state is None:
_a : Any = 0
else:
_a : Optional[Any] = next_state
for key in self.adlist[current_state]["output"]:
if key not in result:
_a : Any = []
result[key].append(i - len(UpperCAmelCase__ ) + 1 )
return result
if __name__ == "__main__":
import doctest
doctest.testmod()
| 324 |
"""simple docstring"""
from . import __version__
# Backward compatibility imports, to make sure all those objects can be found in file_utils
from .utils import (
CLOUDFRONT_DISTRIB_PREFIX,
CONFIG_NAME,
DISABLE_TELEMETRY,
DUMMY_INPUTS,
DUMMY_MASK,
ENV_VARS_TRUE_AND_AUTO_VALUES,
ENV_VARS_TRUE_VALUES,
FEATURE_EXTRACTOR_NAME,
FLAX_WEIGHTS_NAME,
HF_MODULES_CACHE,
HUGGINGFACE_CO_PREFIX,
HUGGINGFACE_CO_RESOLVE_ENDPOINT,
MODEL_CARD_NAME,
MULTIPLE_CHOICE_DUMMY_INPUTS,
PYTORCH_PRETRAINED_BERT_CACHE,
PYTORCH_TRANSFORMERS_CACHE,
S3_BUCKET_PREFIX,
SENTENCEPIECE_UNDERLINE,
SPIECE_UNDERLINE,
TF2_WEIGHTS_NAME,
TF_WEIGHTS_NAME,
TORCH_FX_REQUIRED_VERSION,
TRANSFORMERS_CACHE,
TRANSFORMERS_DYNAMIC_MODULE_NAME,
USE_JAX,
USE_TF,
USE_TORCH,
WEIGHTS_INDEX_NAME,
WEIGHTS_NAME,
ContextManagers,
DummyObject,
EntryNotFoundError,
ExplicitEnum,
ModelOutput,
PaddingStrategy,
PushToHubMixin,
RepositoryNotFoundError,
RevisionNotFoundError,
TensorType,
_LazyModule,
add_code_sample_docstrings,
add_end_docstrings,
add_start_docstrings,
add_start_docstrings_to_model_forward,
cached_property,
copy_func,
default_cache_path,
define_sagemaker_information,
get_cached_models,
get_file_from_repo,
get_full_repo_name,
get_torch_version,
has_file,
http_user_agent,
is_apex_available,
is_bsa_available,
is_coloredlogs_available,
is_datasets_available,
is_detectrona_available,
is_faiss_available,
is_flax_available,
is_ftfy_available,
is_in_notebook,
is_ipex_available,
is_librosa_available,
is_offline_mode,
is_onnx_available,
is_pandas_available,
is_phonemizer_available,
is_protobuf_available,
is_psutil_available,
is_pyanvml_available,
is_pyctcdecode_available,
is_pytesseract_available,
is_pytorch_quantization_available,
is_rjieba_available,
is_sagemaker_dp_enabled,
is_sagemaker_mp_enabled,
is_scipy_available,
is_sentencepiece_available,
is_seqio_available,
is_sklearn_available,
is_soundfile_availble,
is_spacy_available,
is_speech_available,
is_tensor,
is_tensorflow_probability_available,
is_tfaonnx_available,
is_tf_available,
is_timm_available,
is_tokenizers_available,
is_torch_available,
is_torch_bfaa_available,
is_torch_cuda_available,
is_torch_fx_available,
is_torch_fx_proxy,
is_torch_mps_available,
is_torch_tfaa_available,
is_torch_tpu_available,
is_torchaudio_available,
is_training_run_on_sagemaker,
is_vision_available,
replace_return_docstrings,
requires_backends,
to_numpy,
to_py_obj,
torch_only_method,
)
| 324 | 1 |
"""simple docstring"""
def _lowercase ( __lowerCAmelCase , __lowerCAmelCase ) -> float:
def get_matched_characters(__lowerCAmelCase , __lowerCAmelCase ) -> str:
SCREAMING_SNAKE_CASE__ : Any = []
SCREAMING_SNAKE_CASE__ : int = min(len(_stra ) , len(_stra ) ) // 2
for i, l in enumerate(_stra ):
SCREAMING_SNAKE_CASE__ : str = int(max(0 , i - limit ) )
SCREAMING_SNAKE_CASE__ : Dict = int(min(i + limit + 1 , len(_stra ) ) )
if l in _stra[left:right]:
matched.append(__lowerCAmelCase )
SCREAMING_SNAKE_CASE__ : Tuple = F'''{_stra[0:_stra.index(__lowerCAmelCase )]} {_stra[_stra.index(__lowerCAmelCase ) + 1:]}'''
return "".join(__lowerCAmelCase )
# matching characters
SCREAMING_SNAKE_CASE__ : Optional[int] = get_matched_characters(__lowerCAmelCase , __lowerCAmelCase )
SCREAMING_SNAKE_CASE__ : Dict = get_matched_characters(__lowerCAmelCase , __lowerCAmelCase )
SCREAMING_SNAKE_CASE__ : str = len(__lowerCAmelCase )
# transposition
SCREAMING_SNAKE_CASE__ : Optional[Any] = (
len([(ca, ca) for ca, ca in zip(__lowerCAmelCase , __lowerCAmelCase ) if ca != ca] ) // 2
)
if not match_count:
SCREAMING_SNAKE_CASE__ : Dict = 0.0
else:
SCREAMING_SNAKE_CASE__ : Any = (
1
/ 3
* (
match_count / len(__lowerCAmelCase )
+ match_count / len(__lowerCAmelCase )
+ (match_count - transpositions) / match_count
)
)
# common prefix up to 4 characters
SCREAMING_SNAKE_CASE__ : Union[str, Any] = 0
for ca, ca in zip(stra[:4] , stra[:4] ):
if ca == ca:
prefix_len += 1
else:
break
return jaro + 0.1 * prefix_len * (1 - jaro)
if __name__ == "__main__":
import doctest
doctest.testmod()
print(jaro_winkler("hello", "world"))
| 132 |
"""simple docstring"""
import json
import os
import tempfile
import transformers
import datasets
from utils import generate_example_dataset, get_duration
a :Union[str, Any] = 500_000
a ,a :Union[str, Any] = os.path.split(__file__)
a :Union[str, Any] = os.path.join(RESULTS_BASEPATH, "results", RESULTS_FILENAME.replace(".py", ".json"))
@get_duration
def _lowercase ( __lowerCAmelCase , **__lowerCAmelCase ) -> Optional[Any]:
SCREAMING_SNAKE_CASE__ : str = dataset.map(**__lowerCAmelCase )
@get_duration
def _lowercase ( __lowerCAmelCase , **__lowerCAmelCase ) -> int:
SCREAMING_SNAKE_CASE__ : List[str] = dataset.filter(**__lowerCAmelCase )
def _lowercase ( ) -> str:
SCREAMING_SNAKE_CASE__ : str = {"""num examples""": SPEED_TEST_N_EXAMPLES}
with tempfile.TemporaryDirectory() as tmp_dir:
SCREAMING_SNAKE_CASE__ : Tuple = datasets.Features({"""text""": datasets.Value("""string""" ), """numbers""": datasets.Value("""float32""" )} )
SCREAMING_SNAKE_CASE__ : Any = generate_example_dataset(
os.path.join(__lowerCAmelCase , """dataset.arrow""" ) , __lowerCAmelCase , num_examples=__lowerCAmelCase )
SCREAMING_SNAKE_CASE__ : Any = transformers.AutoTokenizer.from_pretrained("""bert-base-cased""" , use_fast=__lowerCAmelCase )
def tokenize(__lowerCAmelCase ):
return tokenizer(examples["""text"""] )
SCREAMING_SNAKE_CASE__ : List[str] = map(__lowerCAmelCase )
SCREAMING_SNAKE_CASE__ : Dict = map(__lowerCAmelCase , batched=__lowerCAmelCase )
SCREAMING_SNAKE_CASE__ : Optional[Any] = map(__lowerCAmelCase , function=lambda __lowerCAmelCase : None , batched=__lowerCAmelCase )
with dataset.formatted_as(type="""numpy""" ):
SCREAMING_SNAKE_CASE__ : Any = map(__lowerCAmelCase , function=lambda __lowerCAmelCase : None , batched=__lowerCAmelCase )
with dataset.formatted_as(type="""pandas""" ):
SCREAMING_SNAKE_CASE__ : Optional[int] = map(__lowerCAmelCase , function=lambda __lowerCAmelCase : None , batched=__lowerCAmelCase )
with dataset.formatted_as(type="""torch""" , columns="""numbers""" ):
SCREAMING_SNAKE_CASE__ : Any = map(__lowerCAmelCase , function=lambda __lowerCAmelCase : None , batched=__lowerCAmelCase )
with dataset.formatted_as(type="""tensorflow""" , columns="""numbers""" ):
SCREAMING_SNAKE_CASE__ : int = map(__lowerCAmelCase , function=lambda __lowerCAmelCase : None , batched=__lowerCAmelCase )
SCREAMING_SNAKE_CASE__ : List[Any] = map(__lowerCAmelCase , function=__lowerCAmelCase , batched=__lowerCAmelCase )
SCREAMING_SNAKE_CASE__ : int = filter(__lowerCAmelCase )
# Activate later when tokenizer support batched inputs
# with dataset.formatted_as(type='numpy'):
# times[func.__name__ + " fast-tokenizer batched numpy"] = func(dataset, function=tokenize, batched=True)
with open(__lowerCAmelCase , """wb""" ) as f:
f.write(json.dumps(__lowerCAmelCase ).encode("""utf-8""" ) )
if __name__ == "__main__": # useful to run the profiler
benchmark_map_filter()
| 132 | 1 |
"""simple docstring"""
from typing import Optional
from torch import nn
from .transformer_ad import TransformeraDModel, TransformeraDModelOutput
class _lowerCamelCase ( nn.Module ):
def __init__(self , __a = 16 , __a = 88 , __a = None , __a = 1 , __a = 0.0 , __a = 32 , __a = None , __a = False , __a = None , __a = None , __a = "geglu" , __a = None , ) -> List[Any]:
super().__init__()
UpperCamelCase = nn.ModuleList(
[
TransformeraDModel(
num_attention_heads=_UpperCamelCase , attention_head_dim=_UpperCamelCase , in_channels=_UpperCamelCase , num_layers=_UpperCamelCase , dropout=_UpperCamelCase , norm_num_groups=_UpperCamelCase , cross_attention_dim=_UpperCamelCase , attention_bias=_UpperCamelCase , sample_size=_UpperCamelCase , num_vector_embeds=_UpperCamelCase , activation_fn=_UpperCamelCase , num_embeds_ada_norm=_UpperCamelCase , )
for _ in range(2 )
] )
# Variables that can be set by a pipeline:
# The ratio of transformer1 to transformer2's output states to be combined during inference
UpperCamelCase = 0.5
# The shape of `encoder_hidden_states` is expected to be
# `(batch_size, condition_lengths[0]+condition_lengths[1], num_features)`
UpperCamelCase = [77, 2_57]
# Which transformer to use to encode which condition.
# E.g. `(1, 0)` means that we'll use `transformers[1](conditions[0])` and `transformers[0](conditions[1])`
UpperCamelCase = [1, 0]
def snake_case_ (self , __a , __a , __a=None , __a=None , __a=None , __a = True , ) -> Dict:
UpperCamelCase = hidden_states
UpperCamelCase = []
UpperCamelCase = 0
# attention_mask is not used yet
for i in range(2 ):
# for each of the two transformers, pass the corresponding condition tokens
UpperCamelCase = encoder_hidden_states[:, tokens_start : tokens_start + self.condition_lengths[i]]
UpperCamelCase = self.transformer_index_for_condition[i]
UpperCamelCase = self.transformers[transformer_index](
_UpperCamelCase , encoder_hidden_states=_UpperCamelCase , timestep=_UpperCamelCase , cross_attention_kwargs=_UpperCamelCase , return_dict=_UpperCamelCase , )[0]
encoded_states.append(encoded_state - input_states )
tokens_start += self.condition_lengths[i]
UpperCamelCase = encoded_states[0] * self.mix_ratio + encoded_states[1] * (1 - self.mix_ratio)
UpperCamelCase = output_states + input_states
if not return_dict:
return (output_states,)
return TransformeraDModelOutput(sample=_UpperCamelCase )
| 358 |
"""simple docstring"""
import doctest
from collections import deque
import numpy as np
class _lowerCamelCase :
def __init__(self ) -> None:
UpperCamelCase = [2, 1, 2, -1]
UpperCamelCase = [1, 2, 3, 4]
def snake_case_ (self ) -> list[float]:
UpperCamelCase = len(self.first_signal )
UpperCamelCase = len(self.second_signal )
UpperCamelCase = max(__a , __a )
# create a zero matrix of max_length x max_length
UpperCamelCase = [[0] * max_length for i in range(__a )]
# fills the smaller signal with zeros to make both signals of same length
if length_first_signal < length_second_signal:
self.first_signal += [0] * (max_length - length_first_signal)
elif length_first_signal > length_second_signal:
self.second_signal += [0] * (max_length - length_second_signal)
for i in range(__a ):
UpperCamelCase = deque(self.second_signal )
rotated_signal.rotate(__a )
for j, item in enumerate(__a ):
matrix[i][j] += item
# multiply the matrix with the first signal
UpperCamelCase = np.matmul(np.transpose(__a ) , np.transpose(self.first_signal ) )
# rounding-off to two decimal places
return [round(__a , 2 ) for i in final_signal]
if __name__ == "__main__":
doctest.testmod()
| 244 | 0 |
import unittest
from transformers import AutoTokenizer, is_flax_available
from transformers.testing_utils import require_flax, require_sentencepiece, require_tokenizers, slow
if is_flax_available():
import jax.numpy as jnp
from transformers import FlaxXLMRobertaModel
@require_sentencepiece
@require_tokenizers
@require_flax
class __lowerCAmelCase ( unittest.TestCase ):
@slow
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = FlaxXLMRobertaModel.from_pretrained('''xlm-roberta-base''' )
__lowerCamelCase = AutoTokenizer.from_pretrained('''xlm-roberta-base''' )
__lowerCamelCase = '''The dog is cute and lives in the garden house'''
__lowerCamelCase = jnp.array([tokenizer.encode(__UpperCAmelCase )] )
__lowerCamelCase = (1, 12, 768) # batch_size, sequence_length, embedding_vector_dim
__lowerCamelCase = jnp.array(
[[-0.0_101, 0.1_218, -0.0_803, 0.0_801, 0.1_327, 0.0_776, -0.1_215, 0.2_383, 0.3_338, 0.3_106, 0.0_300, 0.0_252]] )
__lowerCamelCase = model(__UpperCAmelCase )['''last_hidden_state''']
self.assertEqual(output.shape , __UpperCAmelCase )
# compare the actual values for a slice of last dim
self.assertTrue(jnp.allclose(output[:, :, -1] , __UpperCAmelCase , atol=1E-3 ) )
| 330 |
from transformers import BertTokenizer, EncoderDecoderModel, SeqaSeqTrainer, SeqaSeqTrainingArguments
from transformers.testing_utils import TestCasePlus, require_torch, slow
from transformers.utils import is_datasets_available
if is_datasets_available():
import datasets
class __lowerCAmelCase ( lowerCAmelCase__ ):
@slow
@require_torch
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = EncoderDecoderModel.from_encoder_decoder_pretrained('''prajjwal1/bert-tiny''' , '''prajjwal1/bert-tiny''' )
__lowerCamelCase = BertTokenizer.from_pretrained('''bert-base-uncased''' )
__lowerCamelCase = bertabert.config.encoder.vocab_size
__lowerCamelCase = tokenizer.sep_token_id
__lowerCamelCase = tokenizer.cls_token_id
__lowerCamelCase = 128
__lowerCamelCase = datasets.load_dataset('''cnn_dailymail''' , '''3.0.0''' , split='''train[:1%]''' )
__lowerCamelCase = datasets.load_dataset('''cnn_dailymail''' , '''3.0.0''' , split='''validation[:1%]''' )
__lowerCamelCase = train_dataset.select(range(32 ) )
__lowerCamelCase = val_dataset.select(range(16 ) )
__lowerCamelCase = 4
def _map_to_encoder_decoder_inputs(__UpperCAmelCase ):
# Tokenizer will automatically set [BOS] <text> [EOS]
__lowerCamelCase = tokenizer(batch['''article'''] , padding='''max_length''' , truncation=__UpperCAmelCase , max_length=512 )
__lowerCamelCase = tokenizer(batch['''highlights'''] , padding='''max_length''' , truncation=__UpperCAmelCase , max_length=128 )
__lowerCamelCase = inputs.input_ids
__lowerCamelCase = inputs.attention_mask
__lowerCamelCase = outputs.input_ids
__lowerCamelCase = outputs.input_ids.copy()
__lowerCamelCase = [
[-100 if token == tokenizer.pad_token_id else token for token in labels] for labels in batch['''labels''']
]
__lowerCamelCase = outputs.attention_mask
assert all(len(__UpperCAmelCase ) == 512 for x in inputs.input_ids )
assert all(len(__UpperCAmelCase ) == 128 for x in outputs.input_ids )
return batch
def _compute_metrics(__UpperCAmelCase ):
__lowerCamelCase = pred.label_ids
__lowerCamelCase = pred.predictions
# all unnecessary tokens are removed
__lowerCamelCase = tokenizer.batch_decode(__UpperCAmelCase , skip_special_tokens=__UpperCAmelCase )
__lowerCamelCase = tokenizer.batch_decode(__UpperCAmelCase , skip_special_tokens=__UpperCAmelCase )
__lowerCamelCase = sum([int(pred_str[i] == label_str[i] ) for i in range(len(__UpperCAmelCase ) )] ) / len(__UpperCAmelCase )
return {"accuracy": accuracy}
# map train dataset
__lowerCamelCase = train_dataset.map(
_map_to_encoder_decoder_inputs , batched=__UpperCAmelCase , batch_size=__UpperCAmelCase , remove_columns=['''article''', '''highlights'''] , )
train_dataset.set_format(
type='''torch''' , columns=['''input_ids''', '''attention_mask''', '''decoder_input_ids''', '''decoder_attention_mask''', '''labels'''] , )
# same for validation dataset
__lowerCamelCase = val_dataset.map(
_map_to_encoder_decoder_inputs , batched=__UpperCAmelCase , batch_size=__UpperCAmelCase , remove_columns=['''article''', '''highlights'''] , )
val_dataset.set_format(
type='''torch''' , columns=['''input_ids''', '''attention_mask''', '''decoder_input_ids''', '''decoder_attention_mask''', '''labels'''] , )
__lowerCamelCase = self.get_auto_remove_tmp_dir()
__lowerCamelCase = SeqaSeqTrainingArguments(
output_dir=__UpperCAmelCase , per_device_train_batch_size=__UpperCAmelCase , per_device_eval_batch_size=__UpperCAmelCase , predict_with_generate=__UpperCAmelCase , evaluation_strategy='''steps''' , do_train=__UpperCAmelCase , do_eval=__UpperCAmelCase , warmup_steps=0 , eval_steps=2 , logging_steps=2 , )
# instantiate trainer
__lowerCamelCase = SeqaSeqTrainer(
model=__UpperCAmelCase , args=__UpperCAmelCase , compute_metrics=_compute_metrics , train_dataset=__UpperCAmelCase , eval_dataset=__UpperCAmelCase , tokenizer=__UpperCAmelCase , )
# start training
trainer.train()
| 330 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_lowerCAmelCase : int = {
'configuration_swinv2': ['SWINV2_PRETRAINED_CONFIG_ARCHIVE_MAP', 'Swinv2Config'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase : Dict = [
'SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST',
'Swinv2ForImageClassification',
'Swinv2ForMaskedImageModeling',
'Swinv2Model',
'Swinv2PreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_swinva import SWINV2_PRETRAINED_CONFIG_ARCHIVE_MAP, SwinvaConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_swinva import (
SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST,
SwinvaForImageClassification,
SwinvaForMaskedImageModeling,
SwinvaModel,
SwinvaPreTrainedModel,
)
else:
import sys
_lowerCAmelCase : List[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 356 |
"""simple docstring"""
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from .tokenization_electra import ElectraTokenizer
_lowerCAmelCase : List[str] = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"}
_lowerCAmelCase : int = {
"vocab_file": {
"google/electra-small-generator": (
"https://huggingface.co/google/electra-small-generator/resolve/main/vocab.txt"
),
"google/electra-base-generator": "https://huggingface.co/google/electra-base-generator/resolve/main/vocab.txt",
"google/electra-large-generator": (
"https://huggingface.co/google/electra-large-generator/resolve/main/vocab.txt"
),
"google/electra-small-discriminator": (
"https://huggingface.co/google/electra-small-discriminator/resolve/main/vocab.txt"
),
"google/electra-base-discriminator": (
"https://huggingface.co/google/electra-base-discriminator/resolve/main/vocab.txt"
),
"google/electra-large-discriminator": (
"https://huggingface.co/google/electra-large-discriminator/resolve/main/vocab.txt"
),
},
"tokenizer_file": {
"google/electra-small-generator": (
"https://huggingface.co/google/electra-small-generator/resolve/main/tokenizer.json"
),
"google/electra-base-generator": (
"https://huggingface.co/google/electra-base-generator/resolve/main/tokenizer.json"
),
"google/electra-large-generator": (
"https://huggingface.co/google/electra-large-generator/resolve/main/tokenizer.json"
),
"google/electra-small-discriminator": (
"https://huggingface.co/google/electra-small-discriminator/resolve/main/tokenizer.json"
),
"google/electra-base-discriminator": (
"https://huggingface.co/google/electra-base-discriminator/resolve/main/tokenizer.json"
),
"google/electra-large-discriminator": (
"https://huggingface.co/google/electra-large-discriminator/resolve/main/tokenizer.json"
),
},
}
_lowerCAmelCase : List[Any] = {
"google/electra-small-generator": 5_12,
"google/electra-base-generator": 5_12,
"google/electra-large-generator": 5_12,
"google/electra-small-discriminator": 5_12,
"google/electra-base-discriminator": 5_12,
"google/electra-large-discriminator": 5_12,
}
_lowerCAmelCase : Optional[Any] = {
"google/electra-small-generator": {"do_lower_case": True},
"google/electra-base-generator": {"do_lower_case": True},
"google/electra-large-generator": {"do_lower_case": True},
"google/electra-small-discriminator": {"do_lower_case": True},
"google/electra-base-discriminator": {"do_lower_case": True},
"google/electra-large-discriminator": {"do_lower_case": True},
}
class UpperCAmelCase_ ( _UpperCamelCase ):
__SCREAMING_SNAKE_CASE : Optional[Any] = VOCAB_FILES_NAMES
__SCREAMING_SNAKE_CASE : Any = PRETRAINED_VOCAB_FILES_MAP
__SCREAMING_SNAKE_CASE : List[str] = PRETRAINED_INIT_CONFIGURATION
__SCREAMING_SNAKE_CASE : List[str] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__SCREAMING_SNAKE_CASE : Tuple = ElectraTokenizer
def __init__( self : Dict , A : Dict=None , A : Optional[int]=None , A : Dict=True , A : Optional[Any]="[UNK]" , A : Any="[SEP]" , A : str="[PAD]" , A : Tuple="[CLS]" , A : Optional[Any]="[MASK]" , A : Any=True , A : Tuple=None , **A : Any , ):
super().__init__(
A , tokenizer_file=A , do_lower_case=A , unk_token=A , sep_token=A , pad_token=A , cls_token=A , mask_token=A , tokenize_chinese_chars=A , strip_accents=A , **A , )
_UpperCAmelCase : List[str] = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("lowercase" , A ) != do_lower_case
or normalizer_state.get("strip_accents" , A ) != strip_accents
or normalizer_state.get("handle_chinese_chars" , A ) != tokenize_chinese_chars
):
_UpperCAmelCase : Union[str, Any] = getattr(A , normalizer_state.pop("type" ) )
_UpperCAmelCase : Dict = do_lower_case
_UpperCAmelCase : Optional[int] = strip_accents
_UpperCAmelCase : Any = tokenize_chinese_chars
_UpperCAmelCase : Optional[Any] = normalizer_class(**A )
_UpperCAmelCase : int = do_lower_case
def snake_case_ ( self : Tuple , A : str , A : int=None ):
_UpperCAmelCase : Dict = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def snake_case_ ( self : Any , A : List[int] , A : Optional[List[int]] = None ):
_UpperCAmelCase : Any = [self.sep_token_id]
_UpperCAmelCase : Dict = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def snake_case_ ( self : Any , A : str , A : Optional[str] = None ):
_UpperCAmelCase : List[Any] = self._tokenizer.model.save(A , name=A )
return tuple(A )
| 202 | 0 |
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase__ = logging.get_logger(__name__)
UpperCAmelCase__ = {
'''microsoft/wavlm-base''': '''https://huggingface.co/microsoft/wavlm-base/resolve/main/config.json''',
# See all WavLM models at https://huggingface.co/models?filter=wavlm
}
class lowerCamelCase__ ( lowerCAmelCase):
SCREAMING_SNAKE_CASE__ = '''wavlm'''
def __init__(self , UpperCAmelCase=3_2 , UpperCAmelCase=7_6_8 , UpperCAmelCase=1_2 , UpperCAmelCase=1_2 , UpperCAmelCase=3_0_7_2 , UpperCAmelCase="gelu" , UpperCAmelCase=0.1 , UpperCAmelCase=0.1 , UpperCAmelCase=0.1 , UpperCAmelCase=0.0 , UpperCAmelCase=0.1 , UpperCAmelCase=0.1 , UpperCAmelCase=0.02 , UpperCAmelCase=1e-5 , UpperCAmelCase="group" , UpperCAmelCase="gelu" , UpperCAmelCase=(5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2) , UpperCAmelCase=(5, 2, 2, 2, 2, 2, 2) , UpperCAmelCase=(1_0, 3, 3, 3, 3, 2, 2) , UpperCAmelCase=False , UpperCAmelCase=1_2_8 , UpperCAmelCase=1_6 , UpperCAmelCase=3_2_0 , UpperCAmelCase=8_0_0 , UpperCAmelCase=False , UpperCAmelCase=True , UpperCAmelCase=0.05 , UpperCAmelCase=1_0 , UpperCAmelCase=2 , UpperCAmelCase=0.0 , UpperCAmelCase=1_0 , UpperCAmelCase=3_2_0 , UpperCAmelCase=2 , UpperCAmelCase=0.1 , UpperCAmelCase=1_0_0 , UpperCAmelCase=2_5_6 , UpperCAmelCase=2_5_6 , UpperCAmelCase=0.1 , UpperCAmelCase="mean" , UpperCAmelCase=False , UpperCAmelCase=False , UpperCAmelCase=2_5_6 , UpperCAmelCase=(5_1_2, 5_1_2, 5_1_2, 5_1_2, 1_5_0_0) , UpperCAmelCase=(5, 3, 3, 1, 1) , UpperCAmelCase=(1, 2, 3, 1, 1) , UpperCAmelCase=5_1_2 , UpperCAmelCase=8_0 , UpperCAmelCase=0 , UpperCAmelCase=1 , UpperCAmelCase=2 , UpperCAmelCase=False , UpperCAmelCase=3 , UpperCAmelCase=2 , UpperCAmelCase=3 , UpperCAmelCase=None , **UpperCAmelCase , ) -> Optional[Any]:
super().__init__(**UpperCAmelCase , pad_token_id=UpperCAmelCase , bos_token_id=UpperCAmelCase , eos_token_id=UpperCAmelCase )
_lowercase =hidden_size
_lowercase =feat_extract_norm
_lowercase =feat_extract_activation
_lowercase =list(UpperCAmelCase )
_lowercase =list(UpperCAmelCase )
_lowercase =list(UpperCAmelCase )
_lowercase =conv_bias
_lowercase =num_buckets
_lowercase =max_bucket_distance
_lowercase =num_conv_pos_embeddings
_lowercase =num_conv_pos_embedding_groups
_lowercase =len(self.conv_dim )
_lowercase =num_hidden_layers
_lowercase =intermediate_size
_lowercase =hidden_act
_lowercase =num_attention_heads
_lowercase =hidden_dropout
_lowercase =attention_dropout
_lowercase =activation_dropout
_lowercase =feat_proj_dropout
_lowercase =final_dropout
_lowercase =layerdrop
_lowercase =layer_norm_eps
_lowercase =initializer_range
_lowercase =num_ctc_classes
_lowercase =vocab_size
_lowercase =do_stable_layer_norm
_lowercase =use_weighted_layer_sum
_lowercase =classifier_proj_size
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
'''Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =='''
''' `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ='''
f" {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,"
f" `len(config.conv_kernel) = {len(self.conv_kernel )}`." )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
_lowercase =apply_spec_augment
_lowercase =mask_time_prob
_lowercase =mask_time_length
_lowercase =mask_time_min_masks
_lowercase =mask_feature_prob
_lowercase =mask_feature_length
# parameters for pretraining with codevector quantized representations
_lowercase =num_codevectors_per_group
_lowercase =num_codevector_groups
_lowercase =contrastive_logits_temperature
_lowercase =num_negatives
_lowercase =codevector_dim
_lowercase =proj_codevector_dim
_lowercase =diversity_loss_weight
# ctc loss
_lowercase =ctc_loss_reduction
_lowercase =ctc_zero_infinity
# adapter
_lowercase =add_adapter
_lowercase =adapter_kernel_size
_lowercase =adapter_stride
_lowercase =num_adapter_layers
_lowercase =output_hidden_size or hidden_size
# SequenceClassification-specific parameter. Feel free to ignore for other classes.
_lowercase =classifier_proj_size
# XVector-specific parameters. Feel free to ignore for other classes.
_lowercase =list(UpperCAmelCase )
_lowercase =list(UpperCAmelCase )
_lowercase =list(UpperCAmelCase )
_lowercase =xvector_output_dim
@property
def __A (self ) -> int:
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 5 |
a_ = {
'A': ['B', 'C', 'E'],
'B': ['A', 'D', 'E'],
'C': ['A', 'F', 'G'],
'D': ['B'],
'E': ['A', 'B', 'D'],
'F': ['C'],
'G': ['C'],
}
def lowerCamelCase__ ( _a , _a , _a):
SCREAMING_SNAKE_CASE : int = set()
# keep track of all the paths to be checked
SCREAMING_SNAKE_CASE : int = [[start]]
# return path if start is goal
if start == goal:
return [start]
# keeps looping until all possible paths have been checked
while queue:
# pop the first path from the queue
SCREAMING_SNAKE_CASE : Optional[int] = queue.pop(0)
# get the last node from the path
SCREAMING_SNAKE_CASE : Union[str, Any] = path[-1]
if node not in explored:
SCREAMING_SNAKE_CASE : List[str] = graph[node]
# go through all neighbour nodes, construct a new path and
# push it into the queue
for neighbour in neighbours:
SCREAMING_SNAKE_CASE : List[Any] = list(_a)
new_path.append(_a)
queue.append(_a)
# return path if neighbour is goal
if neighbour == goal:
return new_path
# mark node as explored
explored.add(_a)
# in case there's no path between the 2 nodes
return []
def lowerCamelCase__ ( _a , _a , _a):
if not graph or start not in graph or target not in graph:
return -1
if start == target:
return 0
SCREAMING_SNAKE_CASE : str = [start]
SCREAMING_SNAKE_CASE : Optional[Any] = set(_a)
# Keep tab on distances from `start` node.
SCREAMING_SNAKE_CASE : Union[str, Any] = {start: 0, target: -1}
while queue:
SCREAMING_SNAKE_CASE : Optional[int] = queue.pop(0)
if node == target:
SCREAMING_SNAKE_CASE : Union[str, Any] = (
dist[node] if dist[target] == -1 else min(dist[target] , dist[node])
)
for adjacent in graph[node]:
if adjacent not in visited:
visited.add(_a)
queue.append(_a)
SCREAMING_SNAKE_CASE : Optional[Any] = dist[node] + 1
return dist[target]
if __name__ == "__main__":
print(bfs_shortest_path(demo_graph, 'G', 'D')) # returns ['G', 'C', 'A', 'B', 'D']
print(bfs_shortest_path_distance(demo_graph, 'G', 'D')) # returns 4 | 76 | 0 |
'''simple docstring'''
import argparse
import json
from typing import List
from ltp import LTP
from transformers import BertTokenizer
def UpperCamelCase ( _lowerCamelCase : Tuple ):
# This defines a "chinese character" as anything in the CJK Unicode block:
# https://en.wikipedia.org/wiki/CJK_Unified_Ideographs_(Unicode_block)
#
# Note that the CJK Unicode block is NOT all Japanese and Korean characters,
# despite its name. The modern Korean Hangul alphabet is a different block,
# as is Japanese Hiragana and Katakana. Those alphabets are used to write
# space-separated words, so they are not treated specially and handled
# like the all of the other languages.
if (
(cp >= 0X4e_00 and cp <= 0X9f_ff)
or (cp >= 0X34_00 and cp <= 0X4d_bf) #
or (cp >= 0X2_00_00 and cp <= 0X2_a6_df) #
or (cp >= 0X2_a7_00 and cp <= 0X2_b7_3f) #
or (cp >= 0X2_b7_40 and cp <= 0X2_b8_1f) #
or (cp >= 0X2_b8_20 and cp <= 0X2_ce_af) #
or (cp >= 0Xf9_00 and cp <= 0Xfa_ff)
or (cp >= 0X2_f8_00 and cp <= 0X2_fa_1f) #
): #
return True
return False
def UpperCamelCase ( _lowerCamelCase : str ):
# word like '180' or '身高' or '神'
for char in word:
A__ = ord(_lowerCamelCase )
if not _is_chinese_char(_lowerCamelCase ):
return 0
return 1
def UpperCamelCase ( _lowerCamelCase : List[str] ):
A__ = set()
for token in tokens:
A__ = len(_lowerCamelCase ) > 1 and is_chinese(_lowerCamelCase )
if chinese_word:
word_set.add(_lowerCamelCase )
A__ = list(_lowerCamelCase )
return word_list
def UpperCamelCase ( _lowerCamelCase : List[str] , _lowerCamelCase : set() ):
if not chinese_word_set:
return bert_tokens
A__ = max([len(_lowerCamelCase ) for w in chinese_word_set] )
A__ = bert_tokens
A__, A__ = 0, len(_lowerCamelCase )
while start < end:
A__ = True
if is_chinese(bert_word[start] ):
A__ = min(end - start , _lowerCamelCase )
for i in range(_lowerCamelCase , 1 , -1 ):
A__ = "".join(bert_word[start : start + i] )
if whole_word in chinese_word_set:
for j in range(start + 1 , start + i ):
A__ = "##" + bert_word[j]
A__ = start + i
A__ = False
break
if single_word:
start += 1
return bert_word
def UpperCamelCase ( _lowerCamelCase : List[str] , _lowerCamelCase : LTP , _lowerCamelCase : BertTokenizer ):
A__ = []
for i in range(0 , len(_lowerCamelCase ) , 1_00 ):
A__ = ltp_tokenizer.seg(lines[i : i + 1_00] )[0]
A__ = [get_chinese_word(_lowerCamelCase ) for r in res]
ltp_res.extend(_lowerCamelCase )
assert len(_lowerCamelCase ) == len(_lowerCamelCase )
A__ = []
for i in range(0 , len(_lowerCamelCase ) , 1_00 ):
A__ = bert_tokenizer(lines[i : i + 1_00] , add_special_tokens=_lowerCamelCase , truncation=_lowerCamelCase , max_length=5_12 )
bert_res.extend(res["input_ids"] )
assert len(_lowerCamelCase ) == len(_lowerCamelCase )
A__ = []
for input_ids, chinese_word in zip(_lowerCamelCase , _lowerCamelCase ):
A__ = []
for id in input_ids:
A__ = bert_tokenizer._convert_id_to_token(_lowerCamelCase )
input_tokens.append(_lowerCamelCase )
A__ = add_sub_symbol(_lowerCamelCase , _lowerCamelCase )
A__ = []
# We only save pos of chinese subwords start with ##, which mean is part of a whole word.
for i, token in enumerate(_lowerCamelCase ):
if token[:2] == "##":
A__ = token[2:]
# save chinese tokens' pos
if len(_lowerCamelCase ) == 1 and _is_chinese_char(ord(_lowerCamelCase ) ):
ref_id.append(_lowerCamelCase )
ref_ids.append(_lowerCamelCase )
assert len(_lowerCamelCase ) == len(_lowerCamelCase )
return ref_ids
def UpperCamelCase ( _lowerCamelCase : Any ):
# For Chinese (Ro)Bert, the best result is from : RoBERTa-wwm-ext (https://github.com/ymcui/Chinese-BERT-wwm)
# If we want to fine-tune these model, we have to use same tokenizer : LTP (https://github.com/HIT-SCIR/ltp)
with open(args.file_name , "r" , encoding="utf-8" ) as f:
A__ = f.readlines()
A__ = [line.strip() for line in data if len(_lowerCamelCase ) > 0 and not line.isspace()] # avoid delimiter like '\u2029'
A__ = LTP(args.ltp ) # faster in GPU device
A__ = BertTokenizer.from_pretrained(args.bert )
A__ = prepare_ref(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
with open(args.save_path , "w" , encoding="utf-8" ) as f:
A__ = [json.dumps(_lowerCamelCase ) + "\n" for ref in ref_ids]
f.writelines(_lowerCamelCase )
if __name__ == "__main__":
__lowerCAmelCase : Dict =argparse.ArgumentParser(description="prepare_chinese_ref")
parser.add_argument(
"--file_name",
type=str,
default="./resources/chinese-demo.txt",
help="file need process, same as training data in lm",
)
parser.add_argument(
"--ltp", type=str, default="./resources/ltp", help="resources for LTP tokenizer, usually a path"
)
parser.add_argument("--bert", type=str, default="./resources/robert", help="resources for Bert tokenizer")
parser.add_argument("--save_path", type=str, default="./resources/ref.txt", help="path to save res")
__lowerCAmelCase : List[Any] =parser.parse_args()
main(args)
| 123 |
'''simple docstring'''
import argparse
from pathlib import Path
import torch
from transformers import OPTConfig, OPTModel
from transformers.utils import logging
logging.set_verbosity_info()
__lowerCAmelCase : List[str] =logging.get_logger(__name__)
def UpperCamelCase ( _lowerCamelCase : str ):
A__ = torch.load(_lowerCamelCase , map_location="cpu" )
if "model" in sd.keys():
A__ = torch.load(_lowerCamelCase , map_location="cpu" )["model"]
# pop unnecessary weights
A__ = [
"decoder.version",
"decoder.output_projection.weight",
]
for key in keys_to_delete:
if key in sd:
sd.pop(_lowerCamelCase )
A__ = {
"decoder.project_in_dim.weight": "decoder.project_in.weight",
"decoder.project_out_dim.weight": "decoder.project_out.weight",
"decoder.layer_norm.weight": "decoder.final_layer_norm.weight",
"decoder.layer_norm.bias": "decoder.final_layer_norm.bias",
}
for old_key, new_key in keys_to_rename.items():
if old_key in sd:
A__ = sd.pop(_lowerCamelCase )
A__ = list(sd.keys() )
for key in keys:
if ".qkv_proj." in key:
A__ = sd[key]
# We split QKV in separate Q,K,V
A__ = key.replace(".qkv_proj." , ".q_proj." )
A__ = key.replace(".qkv_proj." , ".k_proj." )
A__ = key.replace(".qkv_proj." , ".v_proj." )
A__ = value.shape[0]
assert depth % 3 == 0
# `SequeuceParallelTransformerBlock` has QKV weight is separated in K,V,Q despite the naming:
# https://cs.github.com/facebookresearch/metaseq/blob/51871bd73cd04c038f239ea2a26db1d7f6b37927/metaseq/modules/sequence_parallel_transformer_layer.py#L97
A__, A__, A__ = torch.split(_lowerCamelCase , depth // 3 , dim=0 )
A__ = q
A__ = k
A__ = v
del sd[key]
return sd
@torch.no_grad()
def UpperCamelCase ( _lowerCamelCase : List[Any] , _lowerCamelCase : Any , _lowerCamelCase : Dict=None ):
A__ = load_checkpoint(_lowerCamelCase )
if config is not None:
A__ = OPTConfig.from_pretrained(_lowerCamelCase )
else:
A__ = OPTConfig()
A__ = OPTModel(_lowerCamelCase ).half().eval()
model.load_state_dict(_lowerCamelCase )
# Check results
Path(_lowerCamelCase ).mkdir(exist_ok=_lowerCamelCase )
model.save_pretrained(_lowerCamelCase )
if __name__ == "__main__":
__lowerCAmelCase : List[str] =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--fairseq_path",
type=str,
help=(
"path to fairseq checkpoint in correct format. You can find all checkpoints in the correct format here:"
" https://huggingface.co/models?other=opt_metasq"
),
)
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--hf_config", default=None, type=str, help="Define HF config.")
__lowerCAmelCase : List[Any] =parser.parse_args()
convert_opt_checkpoint(args.fairseq_path, args.pytorch_dump_folder_path, config=args.hf_config)
| 123 | 1 |
'''simple docstring'''
def lowercase__ ( __lowercase : list[int] ) -> int:
"""simple docstring"""
if not numbers:
return 0
if not isinstance(__lowercase , (list, tuple) ) or not all(
isinstance(__lowercase , __lowercase ) for number in numbers ):
raise ValueError('numbers must be an iterable of integers' )
__UpperCamelCase = __UpperCamelCase = __UpperCamelCase = numbers[0]
for i in range(1 , len(__lowercase ) ):
# update the maximum and minimum subarray products
__UpperCamelCase = numbers[i]
if number < 0:
__UpperCamelCase , __UpperCamelCase = min_till_now, max_till_now
__UpperCamelCase = max(__lowercase , max_till_now * number )
__UpperCamelCase = min(__lowercase , min_till_now * number )
# update the maximum product found till now
__UpperCamelCase = max(__lowercase , __lowercase )
return max_prod
| 53 | '''simple docstring'''
from urllib.parse import quote
import pytest
from datasets.utils.hub import hf_hub_url
@pytest.mark.parametrize("""repo_id""" , ["""canonical_dataset_name""", """org-name/dataset-name"""] )
@pytest.mark.parametrize("""path""" , ["""filename.csv""", """filename with blanks.csv"""] )
@pytest.mark.parametrize("""revision""" , [None, """v2"""] )
def lowerCamelCase ( UpperCAmelCase__ : int , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : Optional[int] ) -> Optional[int]:
lowercase_ : Union[str, Any] = hf_hub_url(repo_id=UpperCAmelCase__ , path=UpperCAmelCase__ , revision=UpperCAmelCase__ )
assert url == F'''https://huggingface.co/datasets/{repo_id}/resolve/{revision or 'main'}/{quote(UpperCAmelCase__ )}'''
| 239 | 0 |
"""simple docstring"""
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import CLIPImageProcessor, CLIPProcessor
@require_vision
class snake_case ( unittest.TestCase):
def a_ ( self : Dict ) -> Dict:
'''simple docstring'''
_A = tempfile.mkdtemp()
# fmt: off
_A = ["l", "o", "w", "e", "r", "s", "t", "i", "d", "n", "lo", "l</w>", "w</w>", "r</w>", "t</w>", "low</w>", "er</w>", "lowest</w>", "newer</w>", "wider", "<unk>", "<|startoftext|>", "<|endoftext|>"]
# fmt: on
_A = dict(zip(a__ , range(len(a__ ) ) ) )
_A = ["#version: 0.2", "l o", "lo w</w>", "e r</w>", ""]
_A = {"unk_token": "<unk>"}
_A = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
_A = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(a__ ) + "\n" )
with open(self.merges_file , "w" , encoding="utf-8" ) as fp:
fp.write("\n".join(a__ ) )
_A = {
"do_resize": True,
"size": 20,
"do_center_crop": True,
"crop_size": 18,
"do_normalize": True,
"image_mean": [0.4_8_1_4_5_4_6_6, 0.4_5_7_8_2_7_5, 0.4_0_8_2_1_0_7_3],
"image_std": [0.2_6_8_6_2_9_5_4, 0.2_6_1_3_0_2_5_8, 0.2_7_5_7_7_7_1_1],
}
_A = os.path.join(self.tmpdirname , a__ )
with open(self.image_processor_file , "w" , encoding="utf-8" ) as fp:
json.dump(a__ , a__ )
def a_ ( self : int , **a__ : Union[str, Any] ) -> List[str]:
'''simple docstring'''
return CLIPTokenizer.from_pretrained(self.tmpdirname , **a__ )
def a_ ( self : str , **a__ : List[str] ) -> int:
'''simple docstring'''
return CLIPTokenizerFast.from_pretrained(self.tmpdirname , **a__ )
def a_ ( self : Tuple , **a__ : Any ) -> int:
'''simple docstring'''
return CLIPImageProcessor.from_pretrained(self.tmpdirname , **a__ )
def a_ ( self : str ) -> int:
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def a_ ( self : str ) -> int:
'''simple docstring'''
_A = [np.random.randint(2_55 , size=(3, 30, 4_00) , dtype=np.uinta )]
_A = [Image.fromarray(np.moveaxis(a__ , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def a_ ( self : Any ) -> Optional[Any]:
'''simple docstring'''
_A = self.get_tokenizer()
_A = self.get_rust_tokenizer()
_A = self.get_image_processor()
_A = CLIPProcessor(tokenizer=a__ , image_processor=a__ )
processor_slow.save_pretrained(self.tmpdirname )
_A = CLIPProcessor.from_pretrained(self.tmpdirname , use_fast=a__ )
_A = CLIPProcessor(tokenizer=a__ , image_processor=a__ )
processor_fast.save_pretrained(self.tmpdirname )
_A = CLIPProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , a__ )
self.assertIsInstance(processor_fast.tokenizer , a__ )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , a__ )
self.assertIsInstance(processor_fast.image_processor , a__ )
def a_ ( self : List[Any] ) -> Dict:
'''simple docstring'''
_A = CLIPProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
_A = self.get_tokenizer(bos_token="(BOS)" , eos_token="(EOS)" )
_A = self.get_image_processor(do_normalize=a__ , padding_value=1.0 )
_A = CLIPProcessor.from_pretrained(
self.tmpdirname , bos_token="(BOS)" , eos_token="(EOS)" , do_normalize=a__ , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , a__ )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , a__ )
def a_ ( self : int ) -> Tuple:
'''simple docstring'''
_A = self.get_image_processor()
_A = self.get_tokenizer()
_A = CLIPProcessor(tokenizer=a__ , image_processor=a__ )
_A = self.prepare_image_inputs()
_A = image_processor(a__ , return_tensors="np" )
_A = processor(images=a__ , return_tensors="np" )
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1E-2 )
def a_ ( self : List[str] ) -> Optional[int]:
'''simple docstring'''
_A = self.get_image_processor()
_A = self.get_tokenizer()
_A = CLIPProcessor(tokenizer=a__ , image_processor=a__ )
_A = "lower newer"
_A = processor(text=a__ )
_A = tokenizer(a__ )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def a_ ( self : List[str] ) -> Any:
'''simple docstring'''
_A = self.get_image_processor()
_A = self.get_tokenizer()
_A = CLIPProcessor(tokenizer=a__ , image_processor=a__ )
_A = "lower newer"
_A = self.prepare_image_inputs()
_A = processor(text=a__ , images=a__ )
self.assertListEqual(list(inputs.keys() ) , ["input_ids", "attention_mask", "pixel_values"] )
# test if it raises when no input is passed
with pytest.raises(a__ ):
processor()
def a_ ( self : Dict ) -> Union[str, Any]:
'''simple docstring'''
_A = self.get_image_processor()
_A = self.get_tokenizer()
_A = CLIPProcessor(tokenizer=a__ , image_processor=a__ )
_A = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
_A = processor.batch_decode(a__ )
_A = tokenizer.batch_decode(a__ )
self.assertListEqual(a__ , a__ )
def a_ ( self : int ) -> Optional[Any]:
'''simple docstring'''
_A = self.get_image_processor()
_A = self.get_tokenizer()
_A = CLIPProcessor(tokenizer=a__ , image_processor=a__ )
_A = "lower newer"
_A = self.prepare_image_inputs()
_A = processor(text=a__ , images=a__ )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names ) | 163 |
"""simple docstring"""
import inspect
import unittest
from datasets import load_dataset
from packaging import version
from transformers import BeitConfig
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
MODEL_MAPPING,
BeitForImageClassification,
BeitForMaskedImageModeling,
BeitForSemanticSegmentation,
BeitModel,
)
from transformers.models.beit.modeling_beit import BEIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
import PIL
from PIL import Image
from transformers import BeitImageProcessor
class snake_case :
def __init__( self : Optional[int] , a__ : Tuple , a__ : str=1_00 , a__ : Dict=13 , a__ : Tuple=30 , a__ : str=2 , a__ : List[Any]=3 , a__ : Dict=True , a__ : Optional[Any]=True , a__ : List[Any]=32 , a__ : Tuple=4 , a__ : Tuple=4 , a__ : Optional[int]=37 , a__ : Tuple="gelu" , a__ : Optional[int]=0.1 , a__ : int=0.1 , a__ : Optional[Any]=10 , a__ : Optional[int]=0.0_2 , a__ : Dict=3 , a__ : str=None , a__ : Any=[0, 1, 2, 3] , ) -> Tuple:
'''simple docstring'''
_A = parent
_A = 1_00
_A = batch_size
_A = image_size
_A = patch_size
_A = num_channels
_A = is_training
_A = use_labels
_A = hidden_size
_A = num_hidden_layers
_A = num_attention_heads
_A = intermediate_size
_A = hidden_act
_A = hidden_dropout_prob
_A = attention_probs_dropout_prob
_A = type_sequence_label_size
_A = initializer_range
_A = scope
_A = out_indices
_A = num_labels
# in BeiT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
_A = (image_size // patch_size) ** 2
_A = num_patches + 1
def a_ ( self : List[str] ) -> str:
'''simple docstring'''
_A = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_A = None
_A = None
if self.use_labels:
_A = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_A = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
_A = self.get_config()
return config, pixel_values, labels, pixel_labels
def a_ ( self : Tuple ) -> Union[str, Any]:
'''simple docstring'''
return BeitConfig(
vocab_size=self.vocab_size , image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=a__ , initializer_range=self.initializer_range , out_indices=self.out_indices , )
def a_ ( self : Any , a__ : List[str] , a__ : Tuple , a__ : List[str] , a__ : str ) -> Any:
'''simple docstring'''
_A = BeitModel(config=a__ )
model.to(a__ )
model.eval()
_A = model(a__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def a_ ( self : List[str] , a__ : Optional[Any] , a__ : Tuple , a__ : Any , a__ : Optional[Any] ) -> Tuple:
'''simple docstring'''
_A = BeitForMaskedImageModeling(config=a__ )
model.to(a__ )
model.eval()
_A = model(a__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length - 1, self.vocab_size) )
def a_ ( self : Optional[Any] , a__ : Optional[int] , a__ : Optional[Any] , a__ : List[str] , a__ : Dict ) -> Dict:
'''simple docstring'''
_A = self.type_sequence_label_size
_A = BeitForImageClassification(a__ )
model.to(a__ )
model.eval()
_A = model(a__ , labels=a__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
_A = 1
_A = BeitForImageClassification(a__ )
model.to(a__ )
model.eval()
_A = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
_A = model(a__ , labels=a__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def a_ ( self : Optional[Any] , a__ : Optional[Any] , a__ : Union[str, Any] , a__ : Union[str, Any] , a__ : Dict ) -> str:
'''simple docstring'''
_A = self.num_labels
_A = BeitForSemanticSegmentation(a__ )
model.to(a__ )
model.eval()
_A = model(a__ )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size * 2, self.image_size * 2) )
_A = model(a__ , labels=a__ )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size * 2, self.image_size * 2) )
def a_ ( self : List[Any] ) -> Any:
'''simple docstring'''
_A = self.prepare_config_and_inputs()
_A , _A , _A , _A = config_and_inputs
_A = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class snake_case ( _UpperCamelCase , _UpperCamelCase , unittest.TestCase):
__UpperCamelCase = (
(BeitModel, BeitForImageClassification, BeitForMaskedImageModeling, BeitForSemanticSegmentation)
if is_torch_available()
else ()
)
__UpperCamelCase = (
{
'feature-extraction': BeitModel,
'image-classification': BeitForImageClassification,
'image-segmentation': BeitForSemanticSegmentation,
}
if is_torch_available()
else {}
)
__UpperCamelCase = False
__UpperCamelCase = False
__UpperCamelCase = False
def a_ ( self : List[Any] ) -> List[Any]:
'''simple docstring'''
_A = BeitModelTester(self )
_A = ConfigTester(self , config_class=a__ , has_text_modality=a__ , hidden_size=37 )
def a_ ( self : Any ) -> Union[str, Any]:
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason="BEiT does not use inputs_embeds" )
def a_ ( self : Optional[Any] ) -> Tuple:
'''simple docstring'''
pass
@require_torch_multi_gpu
@unittest.skip(reason="BEiT has some layers using `add_module` which doesn't work well with `nn.DataParallel`" )
def a_ ( self : Any ) -> int:
'''simple docstring'''
pass
def a_ ( self : Optional[int] ) -> Dict:
'''simple docstring'''
_A , _A = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_A = model_class(a__ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
_A = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(a__ , nn.Linear ) )
def a_ ( self : Tuple ) -> List[Any]:
'''simple docstring'''
_A , _A = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_A = model_class(a__ )
_A = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_A = [*signature.parameters.keys()]
_A = ["pixel_values"]
self.assertListEqual(arg_names[:1] , a__ )
def a_ ( self : Dict ) -> Any:
'''simple docstring'''
_A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*a__ )
def a_ ( self : Dict ) -> Optional[int]:
'''simple docstring'''
_A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*a__ )
def a_ ( self : int ) -> List[Any]:
'''simple docstring'''
_A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*a__ )
def a_ ( self : Optional[int] ) -> Tuple:
'''simple docstring'''
_A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*a__ )
def a_ ( self : Dict ) -> Optional[Any]:
'''simple docstring'''
if not self.model_tester.is_training:
return
_A , _A = self.model_tester.prepare_config_and_inputs_for_common()
_A = True
for model_class in self.all_model_classes:
# we don't test BeitForMaskedImageModeling
if model_class in [*get_values(a__ ), BeitForMaskedImageModeling]:
continue
_A = model_class(a__ )
model.to(a__ )
model.train()
_A = self._prepare_for_class(a__ , a__ , return_labels=a__ )
_A = model(**a__ ).loss
loss.backward()
def a_ ( self : List[str] ) -> Dict:
'''simple docstring'''
_A , _A = self.model_tester.prepare_config_and_inputs_for_common()
if not self.model_tester.is_training:
return
_A = False
_A = True
for model_class in self.all_model_classes:
# we don't test BeitForMaskedImageModeling
if (
model_class in [*get_values(a__ ), BeitForMaskedImageModeling]
or not model_class.supports_gradient_checkpointing
):
continue
_A = model_class(a__ )
model.gradient_checkpointing_enable()
model.to(a__ )
model.train()
_A = self._prepare_for_class(a__ , a__ , return_labels=a__ )
_A = model(**a__ ).loss
loss.backward()
def a_ ( self : Dict ) -> Optional[int]:
'''simple docstring'''
_A , _A = self.model_tester.prepare_config_and_inputs_for_common()
_A = _config_zero_init(a__ )
for model_class in self.all_model_classes:
_A = model_class(config=a__ )
for name, param in model.named_parameters():
# we skip lambda parameters as these require special initial values
# determined by config.layer_scale_init_value
if "lambda" in name:
continue
if param.requires_grad:
self.assertIn(
((param.data.mean() * 1E9).round() / 1E9).item() , [0.0, 1.0] , msg=F"""Parameter {name} of model {model_class} seems not properly initialized""" , )
@slow
def a_ ( self : List[str] ) -> int:
'''simple docstring'''
for model_name in BEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_A = BeitModel.from_pretrained(a__ )
self.assertIsNotNone(a__ )
def a__ ( ) -> Tuple:
_A = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class snake_case ( unittest.TestCase):
@cached_property
def a_ ( self : Optional[int] ) -> List[str]:
'''simple docstring'''
return BeitImageProcessor.from_pretrained("microsoft/beit-base-patch16-224" ) if is_vision_available() else None
@slow
def a_ ( self : Union[str, Any] ) -> Tuple:
'''simple docstring'''
_A = BeitForMaskedImageModeling.from_pretrained("microsoft/beit-base-patch16-224-pt22k" ).to(a__ )
_A = self.default_image_processor
_A = prepare_img()
_A = image_processor(images=a__ , return_tensors="pt" ).pixel_values.to(a__ )
# prepare bool_masked_pos
_A = torch.ones((1, 1_96) , dtype=torch.bool ).to(a__ )
# forward pass
with torch.no_grad():
_A = model(pixel_values=a__ , bool_masked_pos=a__ )
_A = outputs.logits
# verify the logits
_A = torch.Size((1, 1_96, 81_92) )
self.assertEqual(logits.shape , a__ )
_A = torch.tensor(
[[-3.2_4_3_7, 0.5_0_7_2, -1_3.9_1_7_4], [-3.2_4_5_6, 0.4_9_4_8, -1_3.9_4_0_1], [-3.2_0_3_3, 0.5_1_2_1, -1_3.8_5_5_0]] ).to(a__ )
self.assertTrue(torch.allclose(logits[bool_masked_pos][:3, :3] , a__ , atol=1E-2 ) )
@slow
def a_ ( self : List[Any] ) -> Optional[int]:
'''simple docstring'''
_A = BeitForImageClassification.from_pretrained("microsoft/beit-base-patch16-224" ).to(a__ )
_A = self.default_image_processor
_A = prepare_img()
_A = image_processor(images=a__ , return_tensors="pt" ).to(a__ )
# forward pass
with torch.no_grad():
_A = model(**a__ )
_A = outputs.logits
# verify the logits
_A = torch.Size((1, 10_00) )
self.assertEqual(logits.shape , a__ )
_A = torch.tensor([-1.2_3_8_5, -1.0_9_8_7, -1.0_1_0_8] ).to(a__ )
self.assertTrue(torch.allclose(logits[0, :3] , a__ , atol=1E-4 ) )
_A = 2_81
self.assertEqual(logits.argmax(-1 ).item() , a__ )
@slow
def a_ ( self : List[Any] ) -> int:
'''simple docstring'''
_A = BeitForImageClassification.from_pretrained("microsoft/beit-large-patch16-224-pt22k-ft22k" ).to(
a__ )
_A = self.default_image_processor
_A = prepare_img()
_A = image_processor(images=a__ , return_tensors="pt" ).to(a__ )
# forward pass
with torch.no_grad():
_A = model(**a__ )
_A = outputs.logits
# verify the logits
_A = torch.Size((1, 2_18_41) )
self.assertEqual(logits.shape , a__ )
_A = torch.tensor([1.6_8_8_1, -0.2_7_8_7, 0.5_9_0_1] ).to(a__ )
self.assertTrue(torch.allclose(logits[0, :3] , a__ , atol=1E-4 ) )
_A = 23_96
self.assertEqual(logits.argmax(-1 ).item() , a__ )
@slow
def a_ ( self : Tuple ) -> Optional[Any]:
'''simple docstring'''
_A = BeitForSemanticSegmentation.from_pretrained("microsoft/beit-base-finetuned-ade-640-640" )
_A = model.to(a__ )
_A = BeitImageProcessor(do_resize=a__ , size=6_40 , do_center_crop=a__ )
_A = load_dataset("hf-internal-testing/fixtures_ade20k" , split="test" )
_A = Image.open(ds[0]["file"] )
_A = image_processor(images=a__ , return_tensors="pt" ).to(a__ )
# forward pass
with torch.no_grad():
_A = model(**a__ )
_A = outputs.logits
# verify the logits
_A = torch.Size((1, 1_50, 1_60, 1_60) )
self.assertEqual(logits.shape , a__ )
_A = version.parse(PIL.__version__ ) < version.parse("9.0.0" )
if is_pillow_less_than_a:
_A = torch.tensor(
[
[[-4.9_2_2_5, -2.3_9_5_4, -3.0_5_2_2], [-2.8_8_2_2, -1.0_0_4_6, -1.7_5_6_1], [-2.9_5_4_9, -1.3_2_2_8, -2.1_3_4_7]],
[[-5.8_1_6_8, -3.4_1_2_9, -4.0_7_7_8], [-3.8_6_5_1, -2.2_2_1_4, -3.0_2_7_7], [-3.8_3_5_6, -2.4_6_4_3, -3.3_5_3_5]],
[[-0.0_0_7_8, 3.9_9_5_2, 4.0_7_5_4], [2.9_8_5_6, 4.6_9_4_4, 5.0_0_3_5], [3.2_4_1_3, 4.7_8_1_3, 4.9_9_6_9]],
] , device=a__ , )
else:
_A = torch.tensor(
[
[[-4.8_9_6_0, -2.3_6_8_8, -3.0_3_5_5], [-2.8_4_7_8, -0.9_8_3_6, -1.7_4_1_8], [-2.9_4_4_9, -1.3_3_3_2, -2.1_4_5_6]],
[[-5.8_0_8_1, -3.4_1_2_4, -4.1_0_0_6], [-3.8_5_6_1, -2.2_0_8_1, -3.0_3_2_3], [-3.8_3_6_5, -2.4_6_0_1, -3.3_6_6_9]],
[[-0.0_3_0_9, 3.9_8_6_8, 4.0_5_4_0], [2.9_6_4_0, 4.6_8_7_7, 4.9_9_7_6], [3.2_0_8_1, 4.7_6_9_0, 4.9_9_4_2]],
] , device=a__ , )
self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , a__ , atol=1E-4 ) )
@slow
def a_ ( self : Union[str, Any] ) -> List[Any]:
'''simple docstring'''
_A = BeitForSemanticSegmentation.from_pretrained("microsoft/beit-base-finetuned-ade-640-640" )
_A = model.to(a__ )
_A = BeitImageProcessor(do_resize=a__ , size=6_40 , do_center_crop=a__ )
_A = load_dataset("hf-internal-testing/fixtures_ade20k" , split="test" )
_A = Image.open(ds[0]["file"] )
_A = image_processor(images=a__ , return_tensors="pt" ).to(a__ )
# forward pass
with torch.no_grad():
_A = model(**a__ )
_A = outputs.logits.detach().cpu()
_A = image_processor.post_process_semantic_segmentation(outputs=a__ , target_sizes=[(5_00, 3_00)] )
_A = torch.Size((5_00, 3_00) )
self.assertEqual(segmentation[0].shape , a__ )
_A = image_processor.post_process_semantic_segmentation(outputs=a__ )
_A = torch.Size((1_60, 1_60) )
self.assertEqual(segmentation[0].shape , a__ ) | 163 | 1 |
import torch
from diffusers import KDPMaDiscreteScheduler
from diffusers.utils import torch_device
from .test_schedulers import SchedulerCommonTest
class UpperCamelCase_ ( _UpperCAmelCase ):
'''simple docstring'''
UpperCAmelCase__ = (KDPMaDiscreteScheduler,)
UpperCAmelCase__ = 10
def SCREAMING_SNAKE_CASE ( self : int , **UpperCAmelCase__ : Any) ->Union[str, Any]:
'''simple docstring'''
A__ = {
'''num_train_timesteps''': 1_100,
'''beta_start''': 0.0001,
'''beta_end''': 0.02,
'''beta_schedule''': '''linear''',
}
config.update(**A_)
return config
def SCREAMING_SNAKE_CASE ( self : Any) ->Dict:
'''simple docstring'''
for timesteps in [10, 50, 100, 1_000]:
self.check_over_configs(num_train_timesteps=A_)
def SCREAMING_SNAKE_CASE ( self : int) ->List[str]:
'''simple docstring'''
for beta_start, beta_end in zip([0.00001, 0.0001, 0.001] , [0.0002, 0.002, 0.02]):
self.check_over_configs(beta_start=A_ , beta_end=A_)
def SCREAMING_SNAKE_CASE ( self : str) ->str:
'''simple docstring'''
for schedule in ["linear", "scaled_linear"]:
self.check_over_configs(beta_schedule=A_)
def SCREAMING_SNAKE_CASE ( self : Any) ->List[Any]:
'''simple docstring'''
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=A_)
def SCREAMING_SNAKE_CASE ( self : Dict) ->int:
'''simple docstring'''
A__ = self.scheduler_classes[0]
A__ = self.get_scheduler_config(prediction_type='''v_prediction''')
A__ = scheduler_class(**A_)
scheduler.set_timesteps(self.num_inference_steps)
A__ = self.dummy_model()
A__ = self.dummy_sample_deter * scheduler.init_noise_sigma
A__ = sample.to(A_)
for i, t in enumerate(scheduler.timesteps):
A__ = scheduler.scale_model_input(A_ , A_)
A__ = model(A_ , A_)
A__ = scheduler.step(A_ , A_ , A_)
A__ = output.prev_sample
A__ = torch.sum(torch.abs(A_))
A__ = torch.mean(torch.abs(A_))
if torch_device in ["cpu", "mps"]:
assert abs(result_sum.item() - 4.69_34e-07) < 1e-2
assert abs(result_mean.item() - 6.11_12e-10) < 1e-3
else:
# CUDA
assert abs(result_sum.item() - 4.6_93_42_86_50_17_09_72e-07) < 1e-2
assert abs(result_mean.item() - 0.0002) < 1e-3
def SCREAMING_SNAKE_CASE ( self : List[str]) ->List[Any]:
'''simple docstring'''
if torch_device == "mps":
return
A__ = self.scheduler_classes[0]
A__ = self.get_scheduler_config()
A__ = scheduler_class(**A_)
scheduler.set_timesteps(self.num_inference_steps)
A__ = self.dummy_model()
A__ = self.dummy_sample_deter * scheduler.init_noise_sigma
A__ = sample.to(A_)
for i, t in enumerate(scheduler.timesteps):
A__ = scheduler.scale_model_input(A_ , A_)
A__ = model(A_ , A_)
A__ = scheduler.step(A_ , A_ , A_)
A__ = output.prev_sample
A__ = torch.sum(torch.abs(A_))
A__ = torch.mean(torch.abs(A_))
if torch_device in ["cpu", "mps"]:
assert abs(result_sum.item() - 20.4125) < 1e-2
assert abs(result_mean.item() - 0.0266) < 1e-3
else:
# CUDA
assert abs(result_sum.item() - 20.4125) < 1e-2
assert abs(result_mean.item() - 0.0266) < 1e-3
def SCREAMING_SNAKE_CASE ( self : str) ->List[str]:
'''simple docstring'''
if torch_device == "mps":
return
A__ = self.scheduler_classes[0]
A__ = self.get_scheduler_config()
A__ = scheduler_class(**A_)
scheduler.set_timesteps(self.num_inference_steps , device=A_)
A__ = self.dummy_model()
A__ = self.dummy_sample_deter.to(A_) * scheduler.init_noise_sigma
for t in scheduler.timesteps:
A__ = scheduler.scale_model_input(A_ , A_)
A__ = model(A_ , A_)
A__ = scheduler.step(A_ , A_ , A_)
A__ = output.prev_sample
A__ = torch.sum(torch.abs(A_))
A__ = torch.mean(torch.abs(A_))
if str(A_).startswith('''cpu'''):
# The following sum varies between 148 and 156 on mps. Why?
assert abs(result_sum.item() - 20.4125) < 1e-2
assert abs(result_mean.item() - 0.0266) < 1e-3
else:
# CUDA
assert abs(result_sum.item() - 20.4125) < 1e-2
assert abs(result_mean.item() - 0.0266) < 1e-3
| 14 |
def _lowercase ( lowercase__ = 2_0_0 ):
__lowerCAmelCase : Union[str, Any] = [1, 2, 5, 1_0, 2_0, 5_0, 1_0_0, 2_0_0]
__lowerCAmelCase : Dict = [0] * (pence + 1)
__lowerCAmelCase : Optional[int] = 1 # base case: 1 way to make 0 pence
for coin in coins:
for i in range(lowercase__ , pence + 1 , 1 ):
number_of_ways[i] += number_of_ways[i - coin]
return number_of_ways[pence]
if __name__ == "__main__":
assert solution(200) == 7_3682
| 275 | 0 |
"""simple docstring"""
from pathlib import Path
import numpy as np
from PIL import Image
def A_ ( _lowerCAmelCase : np.ndarray ):
"""simple docstring"""
_a , _a , _a = rgb[:, :, 0], rgb[:, :, 1], rgb[:, :, 2]
return 0.2_9_8_9 * r + 0.5_8_7_0 * g + 0.1_1_4_0 * b
def A_ ( _lowerCAmelCase : np.ndarray ):
"""simple docstring"""
return (gray > 1_27) & (gray <= 2_55)
def A_ ( _lowerCAmelCase : np.ndarray, _lowerCAmelCase : np.ndarray ):
"""simple docstring"""
_a = np.zeros_like(_lowerCAmelCase )
_a = np.zeros(
(image.shape[0] + kernel.shape[0] - 1, image.shape[1] + kernel.shape[1] - 1) )
# Copy image to padded image
_a = image
# Iterate over image & apply kernel
for x in range(image.shape[1] ):
for y in range(image.shape[0] ):
_a = (
kernel * image_padded[y : y + kernel.shape[0], x : x + kernel.shape[1]]
).sum()
_a = int(summation > 0 )
return output
if __name__ == "__main__":
# read original image
__snake_case = Path(__file__).resolve().parent / '''image_data''' / '''lena.jpg'''
__snake_case = np.array(Image.open(lena_path))
# kernel to be applied
__snake_case = np.array([[0, 1, 0], [1, 1, 1], [0, 1, 0]])
__snake_case = dilation(gray_to_binary(rgb_to_gray(lena)), structuring_element)
# Save the output image
__snake_case = Image.fromarray(output).convert('''RGB''')
pil_img.save('''result_dilation.png''') | 153 |
"""simple docstring"""
def A_ ( _lowerCAmelCase : int ):
"""simple docstring"""
if p < 2:
raise ValueError('''p should not be less than 2!''' )
elif p == 2:
return True
_a = 4
_a = (1 << p) - 1
for _ in range(p - 2 ):
_a = ((s * s) - 2) % m
return s == 0
if __name__ == "__main__":
print(lucas_lehmer_test(7))
print(lucas_lehmer_test(11)) | 153 | 1 |
'''simple docstring'''
def lowerCamelCase (_SCREAMING_SNAKE_CASE : Any ):
__a , __a : Tuple = [], []
while len(_SCREAMING_SNAKE_CASE ) > 1:
__a , __a : Tuple = min(_SCREAMING_SNAKE_CASE ), max(_SCREAMING_SNAKE_CASE )
start.append(_SCREAMING_SNAKE_CASE )
end.append(_SCREAMING_SNAKE_CASE )
collection.remove(_SCREAMING_SNAKE_CASE )
collection.remove(_SCREAMING_SNAKE_CASE )
end.reverse()
return start + collection + end
if __name__ == "__main__":
__lowercase : List[str] = input('Enter numbers separated by a comma:\n').strip()
__lowercase : Tuple = [int(item) for item in user_input.split(',')]
print(*merge_sort(unsorted), sep=',')
| 27 |
'''simple docstring'''
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxSeqaSeqConfigWithPast
from ...utils import logging
__lowercase : Dict = logging.get_logger(__name__)
__lowercase : Optional[Any] = {
'google/umt5-small': 'https://huggingface.co/google/umt5-small/resolve/main/config.json',
# See all umt5 models at https://huggingface.co/models?filter=umt5
}
class __UpperCamelCase ( lowerCAmelCase_ ):
A_ = "umt5"
A_ = ["past_key_values"]
def __init__( self , __a=25_0112 , __a=512 , __a=64 , __a=1024 , __a=8 , __a=None , __a=6 , __a=32 , __a=128 , __a=0.1 , __a=1E-6 , __a=1.0 , __a="gated-gelu" , __a=True , __a=True , __a="T5Tokenizer" , __a=True , __a=0 , __a=1 , __a=0 , **__a , ):
'''simple docstring'''
super().__init__(
is_encoder_decoder=__a , tokenizer_class=__a , tie_word_embeddings=__a , pad_token_id=__a , eos_token_id=__a , decoder_start_token_id=__a , **__a , )
__a : Any = vocab_size
__a : Any = d_model
__a : str = d_kv
__a : Dict = d_ff
__a : Union[str, Any] = num_layers
__a : int = (
num_decoder_layers if num_decoder_layers is not None else self.num_layers
) # default = symmetry
__a : Optional[int] = num_heads
__a : Tuple = relative_attention_num_buckets
__a : Optional[Any] = relative_attention_max_distance
__a : Optional[int] = dropout_rate
__a : List[Any] = layer_norm_epsilon
__a : int = initializer_factor
__a : Union[str, Any] = feed_forward_proj
__a : Any = use_cache
__a : List[Any] = self.feed_forward_proj.split('-' )
__a : Dict = act_info[-1]
__a : Dict = act_info[0] == 'gated'
if len(__a ) > 1 and act_info[0] != "gated" or len(__a ) > 2:
raise ValueError(
f"""`feed_forward_proj`: {feed_forward_proj} is not a valid activation function of the dense layer."""
'Please make sure `feed_forward_proj` is of the format `gated-{ACT_FN}` or `{ACT_FN}`, e.g. '
'\'gated-gelu\' or \'relu\'' )
if feed_forward_proj == "gated-gelu":
__a : Optional[int] = 'gelu_new'
@property
def __UpperCAmelCase ( self ):
'''simple docstring'''
return self.d_model
@property
def __UpperCAmelCase ( self ):
'''simple docstring'''
return self.num_heads
@property
def __UpperCAmelCase ( self ):
'''simple docstring'''
return self.num_layers
class __UpperCamelCase ( lowerCAmelCase_ ):
@property
# Copied from transformers.models.t5.configuration_t5.T5OnnxConfig.inputs
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : str = {
'input_ids': {0: 'batch', 1: 'encoder_sequence'},
'attention_mask': {0: 'batch', 1: 'encoder_sequence'},
}
if self.use_past:
__a : Dict = 'past_encoder_sequence + sequence'
__a : Tuple = {0: 'batch'}
__a : Tuple = {0: 'batch', 1: 'past_decoder_sequence + sequence'}
else:
__a : List[Any] = {0: 'batch', 1: 'decoder_sequence'}
__a : int = {0: 'batch', 1: 'decoder_sequence'}
if self.use_past:
self.fill_with_past_key_values_(__a , direction='inputs' )
return common_inputs
@property
# Copied from transformers.models.t5.configuration_t5.T5OnnxConfig.default_onnx_opset
def __UpperCAmelCase ( self ):
'''simple docstring'''
return 13
@property
def __UpperCAmelCase ( self ):
'''simple docstring'''
return 5E-4
| 27 | 1 |
"""simple docstring"""
import argparse
import torch
from transformers import RemBertConfig, RemBertModel, load_tf_weights_in_rembert
from transformers.utils import logging
logging.set_verbosity_info()
def __lowerCamelCase ( a_ : List[str] , a_ : Dict , a_ : Dict ) -> Optional[int]:
__SCREAMING_SNAKE_CASE :List[str] = RemBertConfig.from_json_file(lowerCAmelCase__ )
print('''Building PyTorch model from configuration: {}'''.format(str(lowerCAmelCase__ ) ) )
__SCREAMING_SNAKE_CASE :Optional[int] = RemBertModel(lowerCAmelCase__ )
# Load weights from tf checkpoint
load_tf_weights_in_rembert(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
# Save pytorch-model
print('''Save PyTorch model to {}'''.format(lowerCAmelCase__ ) )
torch.save(model.state_dict() , lowerCAmelCase__ )
if __name__ == "__main__":
lowerCamelCase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path."
)
parser.add_argument(
"--rembert_config_file",
default=None,
type=str,
required=True,
help=(
"The config json file corresponding to the pre-trained RemBERT model. \n"
"This specifies the model architecture."
),
)
parser.add_argument(
"--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
lowerCamelCase_ = parser.parse_args()
convert_rembert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.rembert_config_file, args.pytorch_dump_path)
| 366 |
"""simple docstring"""
from itertools import product
def __lowerCamelCase ( a_ : int , a_ : int ) -> list[int]:
__SCREAMING_SNAKE_CASE :Tuple = sides_number
__SCREAMING_SNAKE_CASE :List[Any] = max_face_number * dice_number
__SCREAMING_SNAKE_CASE :List[Any] = [0] * (max_total + 1)
__SCREAMING_SNAKE_CASE :Optional[int] = 1
__SCREAMING_SNAKE_CASE :Tuple = range(a_ , max_face_number + 1 )
for dice_numbers in product(a_ , repeat=a_ ):
__SCREAMING_SNAKE_CASE :Any = sum(a_ )
totals_frequencies[total] += 1
return totals_frequencies
def __lowerCamelCase ( ) -> float:
__SCREAMING_SNAKE_CASE :Dict = total_frequency_distribution(
sides_number=4 , dice_number=9 )
__SCREAMING_SNAKE_CASE :Union[str, Any] = total_frequency_distribution(
sides_number=6 , dice_number=6 )
__SCREAMING_SNAKE_CASE :Optional[Any] = 0
__SCREAMING_SNAKE_CASE :Any = 9
__SCREAMING_SNAKE_CASE :List[str] = 4 * 9
__SCREAMING_SNAKE_CASE :Dict = 6
for peter_total in range(a_ , max_peter_total + 1 ):
peter_wins_count += peter_totals_frequencies[peter_total] * sum(
colin_totals_frequencies[min_colin_total:peter_total] )
__SCREAMING_SNAKE_CASE :List[str] = (4**9) * (6**6)
__SCREAMING_SNAKE_CASE :Union[str, Any] = peter_wins_count / total_games_number
__SCREAMING_SNAKE_CASE :str = round(a_ , ndigits=7 )
return rounded_peter_win_probability
if __name__ == "__main__":
print(f'{solution() = }') | 239 | 0 |
'''simple docstring'''
import math
import sys
def lowercase__ ( __lowercase : str ) -> str:
"""simple docstring"""
__UpperCamelCase = ''
try:
with open(__lowercase , 'rb' ) as binary_file:
__UpperCamelCase = binary_file.read()
for dat in data:
__UpperCamelCase = F'''{dat:08b}'''
result += curr_byte
return result
except OSError:
print('File not accessible' )
sys.exit()
def lowercase__ ( __lowercase : str ) -> str:
"""simple docstring"""
__UpperCamelCase = {'0': '0', '1': '1'}
__UpperCamelCase , __UpperCamelCase = '', ''
__UpperCamelCase = len(__lowercase )
for i in range(len(__lowercase ) ):
curr_string += data_bits[i]
if curr_string not in lexicon:
continue
__UpperCamelCase = lexicon[curr_string]
result += last_match_id
__UpperCamelCase = last_match_id + '0'
if math.loga(__lowercase ).is_integer():
__UpperCamelCase = {}
for curr_key in list(__lowercase ):
__UpperCamelCase = lexicon.pop(__lowercase )
__UpperCamelCase = new_lex
__UpperCamelCase = last_match_id + '1'
index += 1
__UpperCamelCase = ''
return result
def lowercase__ ( __lowercase : str , __lowercase : str ) -> None:
"""simple docstring"""
__UpperCamelCase = 8
try:
with open(__lowercase , 'wb' ) as opened_file:
__UpperCamelCase = [
to_write[i : i + byte_length]
for i in range(0 , len(__lowercase ) , __lowercase )
]
if len(result_byte_array[-1] ) % byte_length == 0:
result_byte_array.append('10000000' )
else:
result_byte_array[-1] += "1" + "0" * (
byte_length - len(result_byte_array[-1] ) - 1
)
for elem in result_byte_array[:-1]:
opened_file.write(int(__lowercase , 2 ).to_bytes(1 , byteorder='big' ) )
except OSError:
print('File not accessible' )
sys.exit()
def lowercase__ ( __lowercase : str ) -> str:
"""simple docstring"""
__UpperCamelCase = 0
for letter in data_bits:
if letter == "1":
break
counter += 1
__UpperCamelCase = data_bits[counter:]
__UpperCamelCase = data_bits[counter + 1 :]
return data_bits
def lowercase__ ( __lowercase : str , __lowercase : str ) -> None:
"""simple docstring"""
__UpperCamelCase = read_file_binary(__lowercase )
__UpperCamelCase = remove_prefix(__lowercase )
__UpperCamelCase = decompress_data(__lowercase )
write_file_binary(__lowercase , __lowercase )
if __name__ == "__main__":
compress(sys.argv[1], sys.argv[2])
| 53 |
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import AutoTokenizer, PegasusConfig, is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFAutoModelForSeqaSeqLM, TFPegasusForConditionalGeneration, TFPegasusModel
@require_tf
class a_ :
'''simple docstring'''
UpperCamelCase = PegasusConfig
UpperCamelCase = {}
UpperCamelCase = '''gelu'''
def __init__( self , A , A=13 , A=7 , A=True , A=False , A=99 , A=32 , A=2 , A=4 , A=37 , A=0.1 , A=0.1 , A=40 , A=2 , A=1 , A=0 , ) -> Optional[int]:
_SCREAMING_SNAKE_CASE = parent
_SCREAMING_SNAKE_CASE = batch_size
_SCREAMING_SNAKE_CASE = seq_length
_SCREAMING_SNAKE_CASE = is_training
_SCREAMING_SNAKE_CASE = use_labels
_SCREAMING_SNAKE_CASE = vocab_size
_SCREAMING_SNAKE_CASE = hidden_size
_SCREAMING_SNAKE_CASE = num_hidden_layers
_SCREAMING_SNAKE_CASE = num_attention_heads
_SCREAMING_SNAKE_CASE = intermediate_size
_SCREAMING_SNAKE_CASE = hidden_dropout_prob
_SCREAMING_SNAKE_CASE = attention_probs_dropout_prob
_SCREAMING_SNAKE_CASE = max_position_embeddings
_SCREAMING_SNAKE_CASE = eos_token_id
_SCREAMING_SNAKE_CASE = pad_token_id
_SCREAMING_SNAKE_CASE = bos_token_id
def snake_case_( self ) -> Optional[int]:
_SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
_SCREAMING_SNAKE_CASE = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
_SCREAMING_SNAKE_CASE = tf.concat([input_ids, eos_tensor] , axis=1 )
_SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_SCREAMING_SNAKE_CASE = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
_SCREAMING_SNAKE_CASE = prepare_pegasus_inputs_dict(A , A , A )
return config, inputs_dict
def snake_case_( self , A , A ) -> int:
_SCREAMING_SNAKE_CASE = TFPegasusModel(config=A ).get_decoder()
_SCREAMING_SNAKE_CASE = inputs_dict["""input_ids"""]
_SCREAMING_SNAKE_CASE = input_ids[:1, :]
_SCREAMING_SNAKE_CASE = inputs_dict["""attention_mask"""][:1, :]
_SCREAMING_SNAKE_CASE = inputs_dict["""head_mask"""]
_SCREAMING_SNAKE_CASE = 1
# first forward pass
_SCREAMING_SNAKE_CASE = model(A , attention_mask=A , head_mask=A , use_cache=A )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
_SCREAMING_SNAKE_CASE = ids_tensor((self.batch_size, 3) , config.vocab_size )
_SCREAMING_SNAKE_CASE = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
_SCREAMING_SNAKE_CASE = tf.concat([input_ids, next_tokens] , axis=-1 )
_SCREAMING_SNAKE_CASE = tf.concat([attention_mask, next_attn_mask] , axis=-1 )
_SCREAMING_SNAKE_CASE = model(A , attention_mask=A )[0]
_SCREAMING_SNAKE_CASE = model(A , attention_mask=A , past_key_values=A )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
_SCREAMING_SNAKE_CASE = int(ids_tensor((1,) , output_from_past.shape[-1] ) )
_SCREAMING_SNAKE_CASE = output_from_no_past[:, -3:, random_slice_idx]
_SCREAMING_SNAKE_CASE = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(A , A , rtol=1e-3 )
def lowerCamelCase ( __lowerCamelCase : Optional[Any] , __lowerCamelCase : str , __lowerCamelCase : Optional[int] , __lowerCamelCase : int=None , __lowerCamelCase : Dict=None , __lowerCamelCase : Union[str, Any]=None , __lowerCamelCase : str=None , __lowerCamelCase : List[Any]=None , ) ->int:
if attention_mask is None:
_SCREAMING_SNAKE_CASE = tf.cast(tf.math.not_equal(__lowerCamelCase , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
_SCREAMING_SNAKE_CASE = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
_SCREAMING_SNAKE_CASE = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
_SCREAMING_SNAKE_CASE = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
_SCREAMING_SNAKE_CASE = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
@require_tf
class a_ ( snake_case_ , snake_case_ , unittest.TestCase ):
'''simple docstring'''
UpperCamelCase = (TFPegasusForConditionalGeneration, TFPegasusModel) if is_tf_available() else ()
UpperCamelCase = (TFPegasusForConditionalGeneration,) if is_tf_available() else ()
UpperCamelCase = (
{
'''conversational''': TFPegasusForConditionalGeneration,
'''feature-extraction''': TFPegasusModel,
'''summarization''': TFPegasusForConditionalGeneration,
'''text2text-generation''': TFPegasusForConditionalGeneration,
'''translation''': TFPegasusForConditionalGeneration,
}
if is_tf_available()
else {}
)
UpperCamelCase = True
UpperCamelCase = False
UpperCamelCase = False
def snake_case_( self ) -> Any:
_SCREAMING_SNAKE_CASE = TFPegasusModelTester(self )
_SCREAMING_SNAKE_CASE = ConfigTester(self , config_class=A )
def snake_case_( self ) -> List[str]:
self.config_tester.run_common_tests()
def snake_case_( self ) -> str:
_SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*A )
@require_sentencepiece
@require_tokenizers
@require_tf
class a_ ( unittest.TestCase ):
'''simple docstring'''
UpperCamelCase = [
''' PG&E stated it scheduled the blackouts in response to forecasts for high winds amid dry conditions. The aim is to reduce the risk of wildfires. Nearly 800 thousand customers were scheduled to be affected by the shutoffs which were expected to last through at least midday tomorrow.''',
''' The London trio are up for best UK act and best album, as well as getting two nominations in the best song category."We got told like this morning \'Oh I think you\'re nominated\'", said Dappy."And I was like \'Oh yeah, which one?\' And now we\'ve got nominated for four awards. I mean, wow!"Bandmate Fazer added: "We thought it\'s best of us to come down and mingle with everyone and say hello to the cameras. And now we find we\'ve got four nominations."The band have two shots at the best song prize, getting the nod for their Tynchy Stryder collaboration Number One, and single Strong Again.Their album Uncle B will also go up against records by the likes of Beyonce and Kanye West.N-Dubz picked up the best newcomer Mobo in 2007, but female member Tulisa said they wouldn\'t be too disappointed if they didn\'t win this time around."At the end of the day we\'re grateful to be where we are in our careers."If it don\'t happen then it don\'t happen - live to fight another day and keep on making albums and hits for the fans."Dappy also revealed they could be performing live several times on the night.The group will be doing Number One and also a possible rendition of the War Child single, I Got Soul.The charity song is a re-working of The Killers\' All These Things That I\'ve Done and is set to feature artists like Chipmunk, Ironik and Pixie Lott.This year\'s Mobos will be held outside of London for the first time, in Glasgow on 30 September.N-Dubz said they were looking forward to performing for their Scottish fans and boasted about their recent shows north of the border."We just done Edinburgh the other day," said Dappy."We smashed up an N-Dubz show over there. We done Aberdeen about three or four months ago - we smashed up that show over there! Everywhere we go we smash it up!" ''',
]
UpperCamelCase = [
'''California\'s largest electricity provider has cut power to hundreds of thousands of customers in an effort to'''
''' reduce the risk of wildfires.''',
'''N-Dubz have revealed they\'re "grateful" to have been nominated for four Mobo Awards.''',
] # differs slightly from pytorch, likely due to numerical differences in linear layers
UpperCamelCase = '''google/pegasus-xsum'''
@cached_property
def snake_case_( self ) -> List[str]:
return AutoTokenizer.from_pretrained(self.model_name )
@cached_property
def snake_case_( self ) -> str:
_SCREAMING_SNAKE_CASE = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name )
return model
def snake_case_( self , **A ) -> Optional[Any]:
_SCREAMING_SNAKE_CASE = self.translate_src_text(**A )
assert self.expected_text == generated_words
def snake_case_( self , **A ) -> Union[str, Any]:
_SCREAMING_SNAKE_CASE = self.tokenizer(self.src_text , **A , padding=A , return_tensors="""tf""" )
_SCREAMING_SNAKE_CASE = self.model.generate(
model_inputs.input_ids , attention_mask=model_inputs.attention_mask , num_beams=2 , use_cache=A , )
_SCREAMING_SNAKE_CASE = self.tokenizer.batch_decode(generated_ids.numpy() , skip_special_tokens=A )
return generated_words
@slow
def snake_case_( self ) -> Any:
self._assert_generated_batch_equal_expected()
| 58 | 0 |
"""simple docstring"""
import argparse
import os
import re
lowercase__ = '''src/transformers'''
# Pattern that looks at the indentation in a line.
lowercase__ = re.compile(R"""^(\s*)\S""")
# Pattern that matches `"key":" and puts `key` in group 0.
lowercase__ = re.compile(R"""^\s*\"([^\"]+)\":""")
# Pattern that matches `_import_structure["key"]` and puts `key` in group 0.
lowercase__ = re.compile(R"""^\s*_import_structure\[\"([^\"]+)\"\]""")
# Pattern that matches `"key",` and puts `key` in group 0.
lowercase__ = re.compile(R"""^\s*\"([^\"]+)\",\s*$""")
# Pattern that matches any `[stuff]` and puts `stuff` in group 0.
lowercase__ = re.compile(R"""\[([^\]]+)\]""")
def _snake_case ( lowercase__ ):
_lowerCamelCase : Dict = _re_indent.search(lowercase_ )
return "" if search is None else search.groups()[0]
def _snake_case ( lowercase__ , lowercase__="" , lowercase__=None , lowercase__=None ):
_lowerCamelCase : int = 0
_lowerCamelCase : Optional[Any] = code.split('\n' )
if start_prompt is not None:
while not lines[index].startswith(lowercase_ ):
index += 1
_lowerCamelCase : Union[str, Any] = ['\n'.join(lines[:index] )]
else:
_lowerCamelCase : List[Any] = []
# We split into blocks until we get to the `end_prompt` (or the end of the block).
_lowerCamelCase : List[str] = [lines[index]]
index += 1
while index < len(lowercase_ ) and (end_prompt is None or not lines[index].startswith(lowercase_ )):
if len(lines[index] ) > 0 and get_indent(lines[index] ) == indent_level:
if len(lowercase_ ) > 0 and get_indent(current_block[-1] ).startswith(indent_level + ' ' ):
current_block.append(lines[index] )
blocks.append('\n'.join(lowercase_ ) )
if index < len(lowercase_ ) - 1:
_lowerCamelCase : Optional[Any] = [lines[index + 1]]
index += 1
else:
_lowerCamelCase : Optional[int] = []
else:
blocks.append('\n'.join(lowercase_ ) )
_lowerCamelCase : Optional[Any] = [lines[index]]
else:
current_block.append(lines[index] )
index += 1
# Adds current block if it's nonempty.
if len(lowercase_ ) > 0:
blocks.append('\n'.join(lowercase_ ) )
# Add final block after end_prompt if provided.
if end_prompt is not None and index < len(lowercase_ ):
blocks.append('\n'.join(lines[index:] ) )
return blocks
def _snake_case ( lowercase__ ):
def _inner(lowercase__ ):
return key(lowercase_ ).lower().replace('_' , '' )
return _inner
def _snake_case ( lowercase__ , lowercase__=None ):
# If no key is provided, we use a noop.
def noop(lowercase__ ):
return x
if key is None:
_lowerCamelCase : str = noop
# Constants are all uppercase, they go first.
_lowerCamelCase : Optional[Any] = [obj for obj in objects if key(lowercase_ ).isupper()]
# Classes are not all uppercase but start with a capital, they go second.
_lowerCamelCase : Any = [obj for obj in objects if key(lowercase_ )[0].isupper() and not key(lowercase_ ).isupper()]
# Functions begin with a lowercase, they go last.
_lowerCamelCase : List[str] = [obj for obj in objects if not key(lowercase_ )[0].isupper()]
_lowerCamelCase : Optional[int] = ignore_underscore(lowercase_ )
return sorted(lowercase_ , key=lowercase_ ) + sorted(lowercase_ , key=lowercase_ ) + sorted(lowercase_ , key=lowercase_ )
def _snake_case ( lowercase__ ):
# This inner function sort imports between [ ].
def _replace(lowercase__ ):
_lowerCamelCase : Dict = match.groups()[0]
if "," not in imports:
return f'''[{imports}]'''
_lowerCamelCase : Optional[int] = [part.strip().replace('"' , '' ) for part in imports.split(',' )]
# We will have a final empty element if the line finished with a comma.
if len(keys[-1] ) == 0:
_lowerCamelCase : Optional[Any] = keys[:-1]
return "[" + ", ".join([f'''\"{k}\"''' for k in sort_objects(lowercase_ )] ) + "]"
_lowerCamelCase : int = import_statement.split('\n' )
if len(lowercase_ ) > 3:
# Here we have to sort internal imports that are on several lines (one per name):
# key: [
# "object1",
# "object2",
# ...
# ]
# We may have to ignore one or two lines on each side.
_lowerCamelCase : List[Any] = 2 if lines[1].strip() == '[' else 1
_lowerCamelCase : str = [(i, _re_strip_line.search(lowercase_ ).groups()[0]) for i, line in enumerate(lines[idx:-idx] )]
_lowerCamelCase : Optional[Any] = sort_objects(lowercase_ , key=lambda lowercase__ : x[1] )
_lowerCamelCase : Any = [lines[x[0] + idx] for x in sorted_indices]
return "\n".join(lines[:idx] + sorted_lines + lines[-idx:] )
elif len(lowercase_ ) == 3:
# Here we have to sort internal imports that are on one separate line:
# key: [
# "object1", "object2", ...
# ]
if _re_bracket_content.search(lines[1] ) is not None:
_lowerCamelCase : str = _re_bracket_content.sub(_replace , lines[1] )
else:
_lowerCamelCase : Union[str, Any] = [part.strip().replace('"' , '' ) for part in lines[1].split(',' )]
# We will have a final empty element if the line finished with a comma.
if len(keys[-1] ) == 0:
_lowerCamelCase : int = keys[:-1]
_lowerCamelCase : Optional[Any] = get_indent(lines[1] ) + ', '.join([f'''\"{k}\"''' for k in sort_objects(lowercase_ )] )
return "\n".join(lowercase_ )
else:
# Finally we have to deal with imports fitting on one line
_lowerCamelCase : str = _re_bracket_content.sub(_replace , lowercase_ )
return import_statement
def _snake_case ( lowercase__ , lowercase__=True ):
with open(lowercase_ , encoding='utf-8' ) as f:
_lowerCamelCase : Any = f.read()
if "_import_structure" not in code:
return
# Blocks of indent level 0
_lowerCamelCase : Union[str, Any] = split_code_in_indented_blocks(
lowercase_ , start_prompt='_import_structure = {' , end_prompt='if TYPE_CHECKING:' )
# We ignore block 0 (everything untils start_prompt) and the last block (everything after end_prompt).
for block_idx in range(1 , len(lowercase_ ) - 1 ):
# Check if the block contains some `_import_structure`s thingy to sort.
_lowerCamelCase : int = main_blocks[block_idx]
_lowerCamelCase : Union[str, Any] = block.split('\n' )
# Get to the start of the imports.
_lowerCamelCase : Union[str, Any] = 0
while line_idx < len(lowercase_ ) and "_import_structure" not in block_lines[line_idx]:
# Skip dummy import blocks
if "import dummy" in block_lines[line_idx]:
_lowerCamelCase : List[Any] = len(lowercase_ )
else:
line_idx += 1
if line_idx >= len(lowercase_ ):
continue
# Ignore beginning and last line: they don't contain anything.
_lowerCamelCase : str = '\n'.join(block_lines[line_idx:-1] )
_lowerCamelCase : Optional[int] = get_indent(block_lines[1] )
# Slit the internal block into blocks of indent level 1.
_lowerCamelCase : Optional[int] = split_code_in_indented_blocks(lowercase_ , indent_level=lowercase_ )
# We have two categories of import key: list or _import_structure[key].append/extend
_lowerCamelCase : Union[str, Any] = _re_direct_key if '_import_structure = {' in block_lines[0] else _re_indirect_key
# Grab the keys, but there is a trap: some lines are empty or just comments.
_lowerCamelCase : Tuple = [(pattern.search(lowercase_ ).groups()[0] if pattern.search(lowercase_ ) is not None else None) for b in internal_blocks]
# We only sort the lines with a key.
_lowerCamelCase : List[str] = [(i, key) for i, key in enumerate(lowercase_ ) if key is not None]
_lowerCamelCase : Dict = [x[0] for x in sorted(lowercase_ , key=lambda lowercase__ : x[1] )]
# We reorder the blocks by leaving empty lines/comments as they were and reorder the rest.
_lowerCamelCase : Union[str, Any] = 0
_lowerCamelCase : List[Any] = []
for i in range(len(lowercase_ ) ):
if keys[i] is None:
reorderded_blocks.append(internal_blocks[i] )
else:
_lowerCamelCase : int = sort_objects_in_import(internal_blocks[sorted_indices[count]] )
reorderded_blocks.append(lowercase_ )
count += 1
# And we put our main block back together with its first and last line.
_lowerCamelCase : Optional[int] = '\n'.join(block_lines[:line_idx] + reorderded_blocks + [block_lines[-1]] )
if code != "\n".join(lowercase_ ):
if check_only:
return True
else:
print(f'''Overwriting {file}.''' )
with open(lowercase_ , 'w' , encoding='utf-8' ) as f:
f.write('\n'.join(lowercase_ ) )
def _snake_case ( lowercase__=True ):
_lowerCamelCase : str = []
for root, _, files in os.walk(lowercase_ ):
if "__init__.py" in files:
_lowerCamelCase : List[Any] = sort_imports(os.path.join(lowercase_ , '__init__.py' ) , check_only=lowercase_ )
if result:
_lowerCamelCase : Optional[int] = [os.path.join(lowercase_ , '__init__.py' )]
if len(lowercase_ ) > 0:
raise ValueError(f'''Would overwrite {len(lowercase_ )} files, run `make style`.''' )
if __name__ == "__main__":
lowercase__ = argparse.ArgumentParser()
parser.add_argument("""--check_only""", action="""store_true""", help="""Whether to only check or fix style.""")
lowercase__ = parser.parse_args()
sort_imports_in_all_inits(check_only=args.check_only) | 356 |
"""simple docstring"""
import torch
from diffusers import UnCLIPScheduler
from .test_schedulers import SchedulerCommonTest
class lowerCAmelCase__ ( lowercase ):
'''simple docstring'''
lowerCamelCase__ = (UnCLIPScheduler,)
def A_ ( self , **lowercase ):
_lowerCamelCase : Any = {
'num_train_timesteps': 1000,
'variance_type': 'fixed_small_log',
'clip_sample': True,
'clip_sample_range': 1.0,
'prediction_type': 'epsilon',
}
config.update(**lowercase )
return config
def A_ ( self ):
for timesteps in [1, 5, 100, 1000]:
self.check_over_configs(num_train_timesteps=lowercase )
def A_ ( self ):
for variance in ["fixed_small_log", "learned_range"]:
self.check_over_configs(variance_type=lowercase )
def A_ ( self ):
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=lowercase )
def A_ ( self ):
for clip_sample_range in [1, 5, 10, 20]:
self.check_over_configs(clip_sample_range=lowercase )
def A_ ( self ):
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(prediction_type=lowercase )
def A_ ( self ):
for time_step in [0, 500, 999]:
for prev_timestep in [None, 5, 100, 250, 500, 750]:
if prev_timestep is not None and prev_timestep >= time_step:
continue
self.check_over_forward(time_step=lowercase , prev_timestep=lowercase )
def A_ ( self ):
_lowerCamelCase : Optional[Any] = self.scheduler_classes[0]
_lowerCamelCase : Optional[int] = self.get_scheduler_config(variance_type='fixed_small_log' )
_lowerCamelCase : str = scheduler_class(**lowercase )
assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 1.0000E-10 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(487 ) - 0.0_54_96_25 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(999 ) - 0.9_99_49_87 ) ) < 1E-5
def A_ ( self ):
_lowerCamelCase : List[str] = self.scheduler_classes[0]
_lowerCamelCase : Optional[Any] = self.get_scheduler_config(variance_type='learned_range' )
_lowerCamelCase : int = scheduler_class(**lowercase )
_lowerCamelCase : List[str] = 0.5
assert scheduler._get_variance(1 , predicted_variance=lowercase ) - -10.1_71_27_90 < 1E-5
assert scheduler._get_variance(487 , predicted_variance=lowercase ) - -5.7_99_80_52 < 1E-5
assert scheduler._get_variance(999 , predicted_variance=lowercase ) - -0.0_01_00_11 < 1E-5
def A_ ( self ):
_lowerCamelCase : List[Any] = self.scheduler_classes[0]
_lowerCamelCase : Optional[Any] = self.get_scheduler_config()
_lowerCamelCase : Tuple = scheduler_class(**lowercase )
_lowerCamelCase : Union[str, Any] = scheduler.timesteps
_lowerCamelCase : Any = self.dummy_model()
_lowerCamelCase : Optional[Any] = self.dummy_sample_deter
_lowerCamelCase : Optional[int] = torch.manual_seed(0 )
for i, t in enumerate(lowercase ):
# 1. predict noise residual
_lowerCamelCase : Tuple = model(lowercase , lowercase )
# 2. predict previous mean of sample x_t-1
_lowerCamelCase : List[Any] = scheduler.step(lowercase , lowercase , lowercase , generator=lowercase ).prev_sample
_lowerCamelCase : Optional[int] = pred_prev_sample
_lowerCamelCase : Optional[Any] = torch.sum(torch.abs(lowercase ) )
_lowerCamelCase : List[Any] = torch.mean(torch.abs(lowercase ) )
assert abs(result_sum.item() - 2_52.2_68_24_95 ) < 1E-2
assert abs(result_mean.item() - 0.3_28_47_43 ) < 1E-3
def A_ ( self ):
_lowerCamelCase : Tuple = self.scheduler_classes[0]
_lowerCamelCase : str = self.get_scheduler_config()
_lowerCamelCase : Optional[Any] = scheduler_class(**lowercase )
scheduler.set_timesteps(25 )
_lowerCamelCase : Optional[Any] = scheduler.timesteps
_lowerCamelCase : Optional[int] = self.dummy_model()
_lowerCamelCase : Any = self.dummy_sample_deter
_lowerCamelCase : str = torch.manual_seed(0 )
for i, t in enumerate(lowercase ):
# 1. predict noise residual
_lowerCamelCase : List[Any] = model(lowercase , lowercase )
if i + 1 == timesteps.shape[0]:
_lowerCamelCase : Optional[int] = None
else:
_lowerCamelCase : List[str] = timesteps[i + 1]
# 2. predict previous mean of sample x_t-1
_lowerCamelCase : Union[str, Any] = scheduler.step(
lowercase , lowercase , lowercase , prev_timestep=lowercase , generator=lowercase ).prev_sample
_lowerCamelCase : List[Any] = pred_prev_sample
_lowerCamelCase : Optional[Any] = torch.sum(torch.abs(lowercase ) )
_lowerCamelCase : List[str] = torch.mean(torch.abs(lowercase ) )
assert abs(result_sum.item() - 2_58.2_04_49_83 ) < 1E-2
assert abs(result_mean.item() - 0.3_36_20_38 ) < 1E-3
def A_ ( self ):
pass
def A_ ( self ):
pass | 12 | 0 |
from __future__ import annotations
from collections import deque
from collections.abc import Iterator
from dataclasses import dataclass
@dataclass
class _a :
"""simple docstring"""
UpperCamelCase__ = 42
UpperCamelCase__ = 42
class _a :
"""simple docstring"""
def __init__( self: Optional[Any] , __lowerCamelCase: int ):
'''simple docstring'''
UpperCamelCase__: list[list[Edge]] = [[] for _ in range(__lowerCamelCase )]
UpperCamelCase__: Union[str, Any] = size
def __getitem__( self: Any , __lowerCamelCase: int ):
'''simple docstring'''
return iter(self._graph[vertex] )
@property
def UpperCAmelCase_ ( self: Tuple ):
'''simple docstring'''
return self._size
def UpperCAmelCase_ ( self: List[Any] , __lowerCamelCase: int , __lowerCamelCase: int , __lowerCamelCase: int ):
'''simple docstring'''
if weight not in (0, 1):
raise ValueError("Edge weight must be either 0 or 1." )
if to_vertex < 0 or to_vertex >= self.size:
raise ValueError("Vertex indexes must be in [0; size)." )
self._graph[from_vertex].append(Edge(__lowerCamelCase , __lowerCamelCase ) )
def UpperCAmelCase_ ( self: Union[str, Any] , __lowerCamelCase: int , __lowerCamelCase: int ):
'''simple docstring'''
UpperCamelCase__: List[str] = deque([start_vertex] )
UpperCamelCase__: list[int | None] = [None] * self.size
UpperCamelCase__: Union[str, Any] = 0
while queue:
UpperCamelCase__: List[str] = queue.popleft()
UpperCamelCase__: Dict = distances[current_vertex]
if current_distance is None:
continue
for edge in self[current_vertex]:
UpperCamelCase__: str = current_distance + edge.weight
UpperCamelCase__: List[str] = distances[edge.destination_vertex]
if (
isinstance(__lowerCamelCase , __lowerCamelCase )
and new_distance >= dest_vertex_distance
):
continue
UpperCamelCase__: Any = new_distance
if edge.weight == 0:
queue.appendleft(edge.destination_vertex )
else:
queue.append(edge.destination_vertex )
if distances[finish_vertex] is None:
raise ValueError("No path from start_vertex to finish_vertex." )
return distances[finish_vertex]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 149 |
import requests
from bsa import BeautifulSoup
def lowerCAmelCase_ ( A_ = "https://www.worldometers.info/coronavirus"):
UpperCamelCase__: Union[str, Any] = BeautifulSoup(requests.get(A_).text ,"html.parser")
UpperCamelCase__: Tuple = soup.findAll("h1")
UpperCamelCase__: str = soup.findAll("div" ,{"class": "maincounter-number"})
keys += soup.findAll("span" ,{"class": "panel-title"})
values += soup.findAll("div" ,{"class": "number-table-main"})
return {key.text.strip(): value.text.strip() for key, value in zip(A_ ,A_)}
if __name__ == "__main__":
print('''\033[1m''' + '''COVID-19 Status of the World''' + '''\033[0m\n''')
for key, value in world_covidaa_stats().items():
print(f"{key}\n{value}\n")
| 149 | 1 |
import os
import pytest
import yaml
from datasets.features.features import Features, Value
from datasets.info import DatasetInfo, DatasetInfosDict
@pytest.mark.parametrize(
'files' , [
['full:README.md', 'dataset_infos.json'],
['empty:README.md', 'dataset_infos.json'],
['dataset_infos.json'],
['full:README.md'],
] , )
def __UpperCamelCase (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> str:
lowercase__ = tmp_path_factory.mktemp('dset_infos_dir' )
if "full:README.md" in files:
with open(dataset_infos_dir / 'README.md' , 'w' ) as f:
f.write('---\ndataset_info:\n dataset_size: 42\n---' )
if "empty:README.md" in files:
with open(dataset_infos_dir / 'README.md' , 'w' ) as f:
f.write('' )
# we want to support dataset_infos.json for backward compatibility
if "dataset_infos.json" in files:
with open(dataset_infos_dir / 'dataset_infos.json' , 'w' ) as f:
f.write('{"default": {"dataset_size": 42}}' )
lowercase__ = DatasetInfosDict.from_directory(_SCREAMING_SNAKE_CASE )
assert dataset_infos
assert dataset_infos["default"].dataset_size == 42
@pytest.mark.parametrize(
'dataset_info' , [
DatasetInfo(),
DatasetInfo(
description='foo' , features=Features({'a': Value('int32' )} ) , builder_name='builder' , config_name='config' , version='1.0.0' , splits=[{'name': 'train'}] , download_size=42 , ),
] , )
def __UpperCamelCase (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Optional[int]:
lowercase__ = str(_SCREAMING_SNAKE_CASE )
dataset_info.write_to_directory(_SCREAMING_SNAKE_CASE )
lowercase__ = DatasetInfo.from_directory(_SCREAMING_SNAKE_CASE )
assert dataset_info == reloaded
assert os.path.exists(os.path.join(_SCREAMING_SNAKE_CASE , 'dataset_info.json' ) )
def __UpperCamelCase () -> Dict:
lowercase__ = DatasetInfo(
description='foo' , citation='bar' , homepage='https://foo.bar' , license='CC0' , features=Features({'a': Value('int32' )} ) , post_processed={} , supervised_keys=() , task_templates=[] , builder_name='builder' , config_name='config' , version='1.0.0' , splits=[{'name': 'train', 'num_examples': 42}] , download_checksums={} , download_size=1337 , post_processing_size=442 , dataset_size=1234 , size_in_bytes=1337 + 442 + 1234 , )
lowercase__ = dataset_info._to_yaml_dict()
assert sorted(_SCREAMING_SNAKE_CASE ) == sorted(DatasetInfo._INCLUDED_INFO_IN_YAML )
for key in DatasetInfo._INCLUDED_INFO_IN_YAML:
assert key in dataset_info_yaml_dict
assert isinstance(dataset_info_yaml_dict[key] , (list, dict, int, str) )
lowercase__ = yaml.safe_dump(_SCREAMING_SNAKE_CASE )
lowercase__ = yaml.safe_load(_SCREAMING_SNAKE_CASE )
assert dataset_info_yaml_dict == reloaded
def __UpperCamelCase () -> str:
lowercase__ = DatasetInfo()
lowercase__ = dataset_info._to_yaml_dict()
assert dataset_info_yaml_dict == {}
@pytest.mark.parametrize(
'dataset_infos_dict' , [
DatasetInfosDict(),
DatasetInfosDict({'default': DatasetInfo()} ),
DatasetInfosDict({'my_config_name': DatasetInfo()} ),
DatasetInfosDict(
{
'default': DatasetInfo(
description='foo' , features=Features({'a': Value('int32' )} ) , builder_name='builder' , config_name='config' , version='1.0.0' , splits=[{'name': 'train'}] , download_size=42 , )
} ),
DatasetInfosDict(
{
'v1': DatasetInfo(dataset_size=42 ),
'v2': DatasetInfo(dataset_size=1337 ),
} ),
] , )
def __UpperCamelCase (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Tuple:
lowercase__ = str(_SCREAMING_SNAKE_CASE )
dataset_infos_dict.write_to_directory(_SCREAMING_SNAKE_CASE )
lowercase__ = DatasetInfosDict.from_directory(_SCREAMING_SNAKE_CASE )
# the config_name of the dataset_infos_dict take over the attribute
for config_name, dataset_info in dataset_infos_dict.items():
lowercase__ = config_name
# the yaml representation doesn't include fields like description or citation
# so we just test that we can recover what we can from the yaml
lowercase__ = DatasetInfo._from_yaml_dict(dataset_info._to_yaml_dict() )
assert dataset_infos_dict == reloaded
if dataset_infos_dict:
assert os.path.exists(os.path.join(_SCREAMING_SNAKE_CASE , 'README.md' ) )
| 269 |
import os
import re
import unicodedata
from shutil import copyfile
from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple, Union
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import is_torch_available, logging
if is_torch_available():
import torch
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
lowercase_ = logging.get_logger(__name__)
lowercase_ = {"""vocab_file""": """spiece.model"""}
lowercase_ = {
"""vocab_file""": {
"""AI-Sweden/gpt-sw3-126m""": """https://huggingface.co/AI-Sweden/gpt-sw3-126m/resolve/main/spiece.model""",
"""AI-Sweden/gpt-sw3-350m""": """https://huggingface.co/AI-Sweden/gpt-sw3-350m/resolve/main/spiece.model""",
"""AI-Sweden/gpt-sw3-1.6b""": """https://huggingface.co/AI-Sweden/gpt-sw3-1.6b/resolve/main/spiece.model""",
"""AI-Sweden/gpt-sw3-6.7b""": """https://huggingface.co/AI-Sweden/gpt-sw3-6.7b/resolve/main/spiece.model""",
"""AI-Sweden/gpt-sw3-20b""": """https://huggingface.co/AI-Sweden/gpt-sw3-20b/resolve/main/spiece.model""",
}
}
lowercase_ = {
"""AI-Sweden/gpt-sw3-126m""": 2_048,
"""AI-Sweden/gpt-sw3-350m""": 2_048,
"""AI-Sweden/gpt-sw3-1.6b""": 2_048,
"""AI-Sweden/gpt-sw3-6.7b""": 2_048,
"""AI-Sweden/gpt-sw3-20b""": 2_048,
}
class SCREAMING_SNAKE_CASE (UpperCAmelCase ):
_UpperCamelCase : List[str] = VOCAB_FILES_NAMES
_UpperCamelCase : List[Any] = PRETRAINED_VOCAB_FILES_MAP
_UpperCamelCase : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCamelCase : Any = ['input_ids', 'attention_mask']
def __init__( self : Optional[Any] , a : Tuple , a : Optional[int]=False , a : str=False , a : str=False , a : Tuple=None , a : Any=None , a : Union[str, Any]=None , a : Union[str, Any]=None , a : Optional[Dict[str, Any]] = None , **a : Optional[int] , )-> None:
"""simple docstring"""
lowercase__ = {} if sp_model_kwargs is None else sp_model_kwargs
lowercase__ = kwargs.get('name_or_path' )
if name_or_path is None:
logger.warning(
'name_or_path not provided, will work for all GPTSw3 models except gpt-sw3-7b,'
' you are testing the model, this can safely be ignored' )
lowercase__ = 'None'
# Default definitions for our 2 tokenizer versions, with None-checks to enable proper testing
lowercase__ = '<|endoftext|>' if eos_token is None else eos_token
lowercase__ = '<unk>' if unk_token is None else unk_token
if "gpt-sw3-7b" in name_or_path:
lowercase__ = unk_token if pad_token is None else pad_token
lowercase__ = eos_token if bos_token is None else bos_token
else:
lowercase__ = '<pad>' if pad_token is None else pad_token
lowercase__ = '<s>' if bos_token is None else bos_token
super().__init__(
do_lower_case=a , remove_space=a , keep_accents=a , bos_token=a , eos_token=a , unk_token=a , pad_token=a , sp_model_kwargs=self.sp_model_kwargs , **a , )
lowercase__ = do_lower_case
lowercase__ = remove_space
lowercase__ = keep_accents
lowercase__ = vocab_file
lowercase__ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(a )
# Used for whitespace normalization in input texts
# fmt : off
lowercase__ = {' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', '', ''}
# fmt : on
# Regular expression to remove non-printing characters (e.g. some unicode control chars) in preprocessing
lowercase__ = re.compile(
f"""[{"".join(map(a , list(range(0 , 9 ) ) + list(range(11 , 32 ) ) + list(range(127 , 160 ) ) + [160, 173, 8_203] ) )}]""" )
def __getstate__( self : Any )-> str:
"""simple docstring"""
lowercase__ = self.__dict__.copy()
lowercase__ = None
return state
def __setstate__( self : int , a : Optional[Any] )-> int:
"""simple docstring"""
lowercase__ = d
# for backward compatibility
if not hasattr(self , 'sp_model_kwargs' ):
lowercase__ = {}
lowercase__ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
@property
# Copied from transformers.models.albert.tokenization_albert.AlbertTokenizer.vocab_size
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] )-> int:
"""simple docstring"""
return len(self.sp_model )
def SCREAMING_SNAKE_CASE_ ( self : Tuple , a : str )-> str:
"""simple docstring"""
lowercase__ = self.non_printing_characters_re.sub('' , a )
# Normalize whitespaces
lowercase__ = ''.join([char if char not in self.whitespaces else ' ' for char in text] )
# NFC Unicode normalization
lowercase__ = unicodedata.normalize('NFC' , a )
return text
def SCREAMING_SNAKE_CASE_ ( self : Any , a : str , **a : Tuple )-> List[str]:
"""simple docstring"""
lowercase__ = self.preprocess_text(a )
return self.sp_model.encode(a , out_type=a )
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] , a : str )-> int:
"""simple docstring"""
return self.sp_model.PieceToId(a )
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] , a : int )-> str:
"""simple docstring"""
return self.sp_model.IdToPiece(a )
@staticmethod
def SCREAMING_SNAKE_CASE_ ( a : str )-> str:
"""simple docstring"""
return out_string
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] , a : List[str] )-> str:
"""simple docstring"""
lowercase__ = []
lowercase__ = ''
lowercase__ = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
# TODO: Check if this is needed, as it ensures that decode(encode(doc)) != doc by adding extra whitespace in the decoded document
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(a ) + token
lowercase__ = True
lowercase__ = []
else:
current_sub_tokens.append(a )
lowercase__ = False
out_string += self.sp_model.decode(a )
return out_string
def SCREAMING_SNAKE_CASE_ ( self : List[str] )-> Dict[str, int]:
"""simple docstring"""
lowercase__ = {self.convert_ids_to_tokens(a ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def SCREAMING_SNAKE_CASE_ ( self : Any , a : str , a : Optional[str] = None )-> Tuple[str]:
"""simple docstring"""
if not os.path.isdir(a ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
lowercase__ = os.path.join(
a , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(a ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , a )
elif not os.path.isfile(self.vocab_file ):
with open(a , 'wb' ) as fi:
lowercase__ = self.sp_model.serialized_model_proto()
fi.write(a )
return (out_vocab_file,)
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] , a : Union[str, List[str]] , a : Union[str, bool] = False )-> Union[List[int], List[List[int]], "torch.Tensor"]:
"""simple docstring"""
if isinstance(a , a ):
lowercase__ = self.preprocess_text(a )
lowercase__ = self.sp_model.encode(a )
else:
lowercase__ = [self.preprocess_text(a ) for t in text]
lowercase__ = self.sp_model.encode(a )
if return_tensors is True or return_tensors == "pt":
lowercase__ = torch.tensor(a )
return token_ids
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] , a : Union[int, List[int]] )-> str:
"""simple docstring"""
return self.sp_model.decode(a )
def SCREAMING_SNAKE_CASE_ ( self : Any , a : "Conversation" )-> List[int]:
"""simple docstring"""
lowercase__ = [f"""User: {text}""" if is_user else f"""Bot: {text}""" for is_user, text in conversation.iter_texts()]
lowercase__ = (
f"""{self.eos_token}{self.bos_token}""" + f"""{self.bos_token}""".join(a ) + f"""{self.bos_token}Bot:"""
)
return self.encode(text=a )
| 269 | 1 |
from numpy import exp, pi, sqrt
def _UpperCAmelCase ( snake_case , snake_case = 0.0 , snake_case = 1.0 ):
"""simple docstring"""
return 1 / sqrt(2 * pi * sigma**2 ) * exp(-((x - mu) ** 2) / (2 * sigma**2) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 82 |
'''simple docstring'''
from argparse import ArgumentParser
from datasets.commands.convert import ConvertCommand
from datasets.commands.dummy_data import DummyDataCommand
from datasets.commands.env import EnvironmentCommand
from datasets.commands.run_beam import RunBeamCommand
from datasets.commands.test import TestCommand
from datasets.utils.logging import set_verbosity_info
def snake_case_ (_a : Tuple ):
return {key.lstrip('''-''' ): value for key, value in zip(unknown_args[::2] , unknown_args[1::2] )}
def snake_case_ ():
UpperCAmelCase = ArgumentParser(
'''HuggingFace Datasets CLI tool''' , usage='''datasets-cli <command> [<args>]''' , allow_abbrev=_a )
UpperCAmelCase = parser.add_subparsers(help='''datasets-cli command helpers''' )
set_verbosity_info()
# Register commands
ConvertCommand.register_subcommand(_a )
EnvironmentCommand.register_subcommand(_a )
TestCommand.register_subcommand(_a )
RunBeamCommand.register_subcommand(_a )
DummyDataCommand.register_subcommand(_a )
# Parse args
UpperCAmelCase , UpperCAmelCase = parser.parse_known_args()
if not hasattr(_a , '''func''' ):
parser.print_help()
exit(1 )
UpperCAmelCase = parse_unknown_args(_a )
# Run
UpperCAmelCase = args.func(_a , **_a )
service.run()
if __name__ == "__main__":
main()
| 34 | 0 |
'''simple docstring'''
def _a ( _lowerCamelCase , _lowerCamelCase ) -> float:
"""simple docstring"""
if discount_rate < 0:
raise ValueError("""Discount rate cannot be negative""" )
if not cash_flows:
raise ValueError("""Cash flows list cannot be empty""" )
__snake_case : int = sum(
cash_flow / ((1 + discount_rate) ** i) for i, cash_flow in enumerate(_lowerCamelCase ) )
return round(_lowerCamelCase , ndigits=2 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 13 |
'''simple docstring'''
from collections import OrderedDict
from typing import Any, List, Mapping, Optional
from ... import PreTrainedTokenizer, TensorType, is_torch_available
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast, PatchingSpec
from ...utils import logging
__UpperCamelCase = logging.get_logger(__name__)
__UpperCamelCase = {
"Salesforce/codegen-350M-nl": "https://huggingface.co/Salesforce/codegen-350M-nl/resolve/main/config.json",
"Salesforce/codegen-350M-multi": "https://huggingface.co/Salesforce/codegen-350M-multi/resolve/main/config.json",
"Salesforce/codegen-350M-mono": "https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/config.json",
"Salesforce/codegen-2B-nl": "https://huggingface.co/Salesforce/codegen-2B-nl/resolve/main/config.json",
"Salesforce/codegen-2B-multi": "https://huggingface.co/Salesforce/codegen-2B-multi/resolve/main/config.json",
"Salesforce/codegen-2B-mono": "https://huggingface.co/Salesforce/codegen-2B-mono/resolve/main/config.json",
"Salesforce/codegen-6B-nl": "https://huggingface.co/Salesforce/codegen-6B-nl/resolve/main/config.json",
"Salesforce/codegen-6B-multi": "https://huggingface.co/Salesforce/codegen-6B-multi/resolve/main/config.json",
"Salesforce/codegen-6B-mono": "https://huggingface.co/Salesforce/codegen-6B-mono/resolve/main/config.json",
"Salesforce/codegen-16B-nl": "https://huggingface.co/Salesforce/codegen-16B-nl/resolve/main/config.json",
"Salesforce/codegen-16B-multi": "https://huggingface.co/Salesforce/codegen-16B-multi/resolve/main/config.json",
"Salesforce/codegen-16B-mono": "https://huggingface.co/Salesforce/codegen-16B-mono/resolve/main/config.json",
}
class _A ( __lowercase ):
lowercase__: str = '''codegen'''
lowercase__: Optional[int] = {
'''max_position_embeddings''': '''n_positions''',
'''hidden_size''': '''n_embd''',
'''num_attention_heads''': '''n_head''',
'''num_hidden_layers''': '''n_layer''',
}
def __init__( self : Union[str, Any] , __magic_name__ : Optional[Any]=5_04_00 , __magic_name__ : Any=20_48 , __magic_name__ : List[str]=20_48 , __magic_name__ : Union[str, Any]=40_96 , __magic_name__ : Tuple=28 , __magic_name__ : Dict=16 , __magic_name__ : List[str]=64 , __magic_name__ : str=None , __magic_name__ : Tuple="gelu_new" , __magic_name__ : Tuple=0.0 , __magic_name__ : Tuple=0.0 , __magic_name__ : Dict=0.0 , __magic_name__ : Optional[Any]=1E-5 , __magic_name__ : int=0.02 , __magic_name__ : List[Any]=True , __magic_name__ : int=5_02_56 , __magic_name__ : int=5_02_56 , __magic_name__ : Any=False , **__magic_name__ : Optional[int] , ) -> int:
"""simple docstring"""
__snake_case : List[str] = vocab_size
__snake_case : Union[str, Any] = n_ctx
__snake_case : int = n_positions
__snake_case : str = n_embd
__snake_case : Dict = n_layer
__snake_case : List[Any] = n_head
__snake_case : Any = n_inner
__snake_case : str = rotary_dim
__snake_case : List[str] = activation_function
__snake_case : Tuple = resid_pdrop
__snake_case : Dict = embd_pdrop
__snake_case : int = attn_pdrop
__snake_case : Tuple = layer_norm_epsilon
__snake_case : Union[str, Any] = initializer_range
__snake_case : Optional[Any] = use_cache
__snake_case : Dict = bos_token_id
__snake_case : Union[str, Any] = eos_token_id
super().__init__(
bos_token_id=__magic_name__ , eos_token_id=__magic_name__ , tie_word_embeddings=__magic_name__ , **__magic_name__ )
class _A ( __lowercase ):
def __init__( self : int , __magic_name__ : PretrainedConfig , __magic_name__ : str = "default" , __magic_name__ : List[PatchingSpec] = None , __magic_name__ : bool = False , ) -> Tuple:
"""simple docstring"""
super().__init__(__magic_name__ , task=__magic_name__ , patching_specs=__magic_name__ , use_past=__magic_name__ )
if not getattr(self._config , """pad_token_id""" , __magic_name__ ):
# TODO: how to do that better?
__snake_case : List[str] = 0
@property
def lowercase__ ( self : Tuple ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
__snake_case : Dict = OrderedDict({"""input_ids""": {0: """batch""", 1: """sequence"""}} )
if self.use_past:
self.fill_with_past_key_values_(__magic_name__ , direction="""inputs""" )
__snake_case : Optional[Any] = {0: """batch""", 1: """past_sequence + sequence"""}
else:
__snake_case : Union[str, Any] = {0: """batch""", 1: """sequence"""}
return common_inputs
@property
def lowercase__ ( self : Tuple ) -> int:
"""simple docstring"""
return self._config.n_layer
@property
def lowercase__ ( self : Union[str, Any] ) -> int:
"""simple docstring"""
return self._config.n_head
def lowercase__ ( self : Dict , __magic_name__ : PreTrainedTokenizer , __magic_name__ : int = -1 , __magic_name__ : int = -1 , __magic_name__ : bool = False , __magic_name__ : Optional[TensorType] = None , ) -> Mapping[str, Any]:
"""simple docstring"""
__snake_case : Tuple = super(__magic_name__ , self ).generate_dummy_inputs(
__magic_name__ , batch_size=__magic_name__ , seq_length=__magic_name__ , is_pair=__magic_name__ , framework=__magic_name__ )
# We need to order the input in the way they appears in the forward()
__snake_case : Union[str, Any] = OrderedDict({"""input_ids""": common_inputs["""input_ids"""]} )
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError("""Cannot generate dummy past_keys inputs without PyTorch installed.""" )
else:
import torch
__snake_case , __snake_case : str = common_inputs["""input_ids"""].shape
# Not using the same length for past_key_values
__snake_case : Tuple = seqlen + 2
__snake_case : Union[str, Any] = (
batch,
self.num_attention_heads,
past_key_values_length,
self._config.hidden_size // self.num_attention_heads,
)
__snake_case : List[str] = [
(torch.zeros(__magic_name__ ), torch.zeros(__magic_name__ )) for _ in range(self.num_layers )
]
__snake_case : Optional[int] = common_inputs["""attention_mask"""]
if self.use_past:
__snake_case : Union[str, Any] = ordered_inputs["""attention_mask"""].dtype
__snake_case : Optional[Any] = torch.cat(
[ordered_inputs["""attention_mask"""], torch.ones(__magic_name__ , __magic_name__ , dtype=__magic_name__ )] , dim=1 )
return ordered_inputs
@property
def lowercase__ ( self : Union[str, Any] ) -> int:
"""simple docstring"""
return 13
| 13 | 1 |
def UpperCAmelCase_ ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase=False ) -> List[str]:
if isinstance(__lowerCAmelCase , __lowerCAmelCase ) and isinstance(__lowerCAmelCase , __lowerCAmelCase ):
__lowercase : Any = len(set_a.intersection(__lowerCAmelCase ) )
if alternative_union:
__lowercase : List[Any] = len(__lowerCAmelCase ) + len(__lowerCAmelCase )
else:
__lowercase : Any = len(set_a.union(__lowerCAmelCase ) )
return intersection / union
if isinstance(__lowerCAmelCase , (list, tuple) ) and isinstance(__lowerCAmelCase , (list, tuple) ):
__lowercase : Optional[int] = [element for element in set_a if element in set_b]
if alternative_union:
__lowercase : Union[str, Any] = len(__lowerCAmelCase ) + len(__lowerCAmelCase )
return len(__lowerCAmelCase ) / union
else:
__lowercase : Tuple = set_a + [element for element in set_b if element not in set_a]
return len(__lowerCAmelCase ) / len(__lowerCAmelCase )
return len(__lowerCAmelCase ) / len(__lowerCAmelCase )
return None
if __name__ == "__main__":
__lowerCAmelCase : Dict = {"a", "b", "c", "d", "e"}
__lowerCAmelCase : Any = {"c", "d", "e", "f", "h", "i"}
print(jaccard_similarity(set_a, set_b))
| 156 |
import unittest
from pathlib import Path
from tempfile import TemporaryDirectory
from transformers import AutoConfig, TFAutoModel, is_tensorflow_text_available, is_tf_available
from transformers.models.bert.tokenization_bert import BertTokenizer
from transformers.testing_utils import require_tensorflow_text, require_tf, slow
if is_tf_available():
import tensorflow as tf
if is_tensorflow_text_available():
from transformers.models.bert import TFBertTokenizer
__lowerCAmelCase : Optional[int] = ["bert-base-uncased", "bert-base-cased"]
__lowerCAmelCase : List[str] = "hf-internal-testing/tiny-bert-tf-only"
if is_tf_available():
class __lowerCAmelCase ( tf.keras.Model ):
"""simple docstring"""
def __init__( self : Any , _snake_case : str ):
super().__init__()
__lowercase : str = tokenizer
__lowercase : Any = AutoConfig.from_pretrained(_snake_case )
__lowercase : Union[str, Any] = TFAutoModel.from_config(_snake_case )
def snake_case_ ( self : str , _snake_case : int ):
__lowercase : Optional[Any] = self.tokenizer(_snake_case )
__lowercase : int = self.bert(**_snake_case )
return out["pooler_output"]
@require_tf
@require_tensorflow_text
class __lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def snake_case_ ( self : int ):
super().setUp()
__lowercase : Optional[int] = [
BertTokenizer.from_pretrained(_snake_case ) for checkpoint in (TOKENIZER_CHECKPOINTS * 2)
] # repeat for when fast_bert_tokenizer=false
__lowercase : Optional[Any] = [TFBertTokenizer.from_pretrained(_snake_case ) for checkpoint in TOKENIZER_CHECKPOINTS] + [
TFBertTokenizer.from_pretrained(_snake_case , use_fast_bert_tokenizer=_snake_case )
for checkpoint in TOKENIZER_CHECKPOINTS
]
assert len(self.tokenizers ) == len(self.tf_tokenizers )
__lowercase : Optional[int] = [
'''This is a straightforward English test sentence.''',
'''This one has some weird characters\rto\nsee\r\nif those\u00E9break things.''',
'''Now we\'re going to add some Chinese: 一 二 三 一二三''',
'''And some much more rare Chinese: 齉 堃 齉堃''',
'''Je vais aussi écrire en français pour tester les accents''',
'''Classical Irish also has some unusual characters, so in they go: Gaelaċ, ꝼ''',
]
__lowercase : Tuple = list(zip(self.test_sentences , self.test_sentences[::-1] ) )
def snake_case_ ( self : List[str] ):
for tokenizer, tf_tokenizer in zip(self.tokenizers , self.tf_tokenizers ):
for test_inputs in (self.test_sentences, self.paired_sentences):
__lowercase : Dict = tokenizer(_snake_case , return_tensors='''tf''' , padding='''longest''' )
__lowercase : int = tf_tokenizer(_snake_case )
for key in python_outputs.keys():
self.assertTrue(tf.reduce_all(python_outputs[key].shape == tf_outputs[key].shape ) )
self.assertTrue(tf.reduce_all(tf.cast(python_outputs[key] , tf.intaa ) == tf_outputs[key] ) )
@slow
def snake_case_ ( self : Union[str, Any] ):
for tf_tokenizer in self.tf_tokenizers:
__lowercase : Union[str, Any] = tf_tokenizer(self.paired_sentences )
__lowercase : List[str] = tf_tokenizer(
text=[sentence[0] for sentence in self.paired_sentences] , text_pair=[sentence[1] for sentence in self.paired_sentences] , )
for key in merged_outputs.keys():
self.assertTrue(tf.reduce_all(tf.cast(merged_outputs[key] , tf.intaa ) == separated_outputs[key] ) )
@slow
def snake_case_ ( self : Optional[Any] ):
for tf_tokenizer in self.tf_tokenizers:
__lowercase : Any = tf.function(_snake_case )
for test_inputs in (self.test_sentences, self.paired_sentences):
__lowercase : List[Any] = tf.constant(_snake_case )
__lowercase : Any = compiled_tokenizer(_snake_case )
__lowercase : Union[str, Any] = tf_tokenizer(_snake_case )
for key in eager_outputs.keys():
self.assertTrue(tf.reduce_all(eager_outputs[key] == compiled_outputs[key] ) )
@slow
def snake_case_ ( self : Tuple ):
for tf_tokenizer in self.tf_tokenizers:
__lowercase : Any = ModelToSave(tokenizer=_snake_case )
__lowercase : str = tf.convert_to_tensor(self.test_sentences )
__lowercase : Union[str, Any] = model(_snake_case ) # Build model with some sample inputs
with TemporaryDirectory() as tempdir:
__lowercase : Union[str, Any] = Path(_snake_case ) / '''saved.model'''
model.save(_snake_case )
__lowercase : List[str] = tf.keras.models.load_model(_snake_case )
__lowercase : Tuple = loaded_model(_snake_case )
# We may see small differences because the loaded model is compiled, so we need an epsilon for the test
self.assertLessEqual(tf.reduce_max(tf.abs(out - loaded_output ) ) , 1E-5 )
| 156 | 1 |
def lowerCamelCase__ (_UpperCAmelCase):
SCREAMING_SNAKE_CASE = len(_UpperCAmelCase)
SCREAMING_SNAKE_CASE = len(matrix[0])
SCREAMING_SNAKE_CASE = min(_UpperCAmelCase , _UpperCAmelCase)
for row in range(_UpperCAmelCase):
# Check if diagonal element is not zero
if matrix[row][row] != 0:
# Eliminate all the elements below the diagonal
for col in range(row + 1 , _UpperCAmelCase):
SCREAMING_SNAKE_CASE = matrix[col][row] / matrix[row][row]
for i in range(_UpperCAmelCase , _UpperCAmelCase):
matrix[col][i] -= multiplier * matrix[row][i]
else:
# Find a non-zero diagonal element to swap rows
SCREAMING_SNAKE_CASE = True
for i in range(row + 1 , _UpperCAmelCase):
if matrix[i][row] != 0:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = matrix[i], matrix[row]
SCREAMING_SNAKE_CASE = False
break
if reduce:
rank -= 1
for i in range(_UpperCAmelCase):
SCREAMING_SNAKE_CASE = matrix[i][rank]
# Reduce the row pointer by one to stay on the same row
row -= 1
return rank
if __name__ == "__main__":
import doctest
doctest.testmod()
| 363 |
import unittest
from transformers import (
MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING,
TextaTextGenerationPipeline,
pipeline,
)
from transformers.testing_utils import is_pipeline_test, require_tf, require_torch
from transformers.utils import is_torch_available
from .test_pipelines_common import ANY
if is_torch_available():
import torch
@is_pipeline_test
class _snake_case ( unittest.TestCase ):
_lowercase : List[Any] = MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
_lowercase : int = TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
def SCREAMING_SNAKE_CASE__ ( self , a , a , a) -> Any:
SCREAMING_SNAKE_CASE = TextaTextGenerationPipeline(model=a , tokenizer=a)
return generator, ["Something to write", "Something else"]
def SCREAMING_SNAKE_CASE__ ( self , a , a) -> Any:
SCREAMING_SNAKE_CASE = generator('Something there')
self.assertEqual(a , [{'generated_text': ANY(a)}])
# These are encoder decoder, they don't just append to incoming string
self.assertFalse(outputs[0]['generated_text'].startswith('Something there'))
SCREAMING_SNAKE_CASE = generator(['This is great !', 'Something else'] , num_return_sequences=2 , do_sample=a)
self.assertEqual(
a , [
[{'generated_text': ANY(a)}, {'generated_text': ANY(a)}],
[{'generated_text': ANY(a)}, {'generated_text': ANY(a)}],
] , )
SCREAMING_SNAKE_CASE = generator(
['This is great !', 'Something else'] , num_return_sequences=2 , batch_size=2 , do_sample=a)
self.assertEqual(
a , [
[{'generated_text': ANY(a)}, {'generated_text': ANY(a)}],
[{'generated_text': ANY(a)}, {'generated_text': ANY(a)}],
] , )
with self.assertRaises(a):
generator(4)
@require_torch
def SCREAMING_SNAKE_CASE__ ( self) -> Any:
SCREAMING_SNAKE_CASE = pipeline('text2text-generation' , model='patrickvonplaten/t5-tiny-random' , framework='pt')
# do_sample=False necessary for reproducibility
SCREAMING_SNAKE_CASE = generator('Something there' , do_sample=a)
self.assertEqual(a , [{'generated_text': ''}])
SCREAMING_SNAKE_CASE = 3
SCREAMING_SNAKE_CASE = generator(
'Something there' , num_return_sequences=a , num_beams=a , )
SCREAMING_SNAKE_CASE = [
{'generated_text': 'Beide Beide Beide Beide Beide Beide Beide Beide Beide'},
{'generated_text': 'Beide Beide Beide Beide Beide Beide Beide Beide'},
{'generated_text': ''},
]
self.assertEqual(a , a)
SCREAMING_SNAKE_CASE = generator('This is a test' , do_sample=a , num_return_sequences=2 , return_tensors=a)
self.assertEqual(
a , [
{'generated_token_ids': ANY(torch.Tensor)},
{'generated_token_ids': ANY(torch.Tensor)},
] , )
SCREAMING_SNAKE_CASE = generator.model.config.eos_token_id
SCREAMING_SNAKE_CASE = '<pad>'
SCREAMING_SNAKE_CASE = generator(
['This is a test', 'This is a second test'] , do_sample=a , num_return_sequences=2 , batch_size=2 , return_tensors=a , )
self.assertEqual(
a , [
[
{'generated_token_ids': ANY(torch.Tensor)},
{'generated_token_ids': ANY(torch.Tensor)},
],
[
{'generated_token_ids': ANY(torch.Tensor)},
{'generated_token_ids': ANY(torch.Tensor)},
],
] , )
@require_tf
def SCREAMING_SNAKE_CASE__ ( self) -> Optional[Any]:
SCREAMING_SNAKE_CASE = pipeline('text2text-generation' , model='patrickvonplaten/t5-tiny-random' , framework='tf')
# do_sample=False necessary for reproducibility
SCREAMING_SNAKE_CASE = generator('Something there' , do_sample=a)
self.assertEqual(a , [{'generated_text': ''}])
| 327 | 0 |
"""simple docstring"""
# Copyright (c) 2021-, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
####################################################################################################
#
# Note: If when running this conversion script you're getting an exception:
# ModuleNotFoundError: No module named 'megatron.model.enums'
# you need to tell python where to find the clone of Megatron-LM, e.g.:
#
# cd /tmp
# git clone https://github.com/NVIDIA/Megatron-LM
# PYTHONPATH=/tmp/Megatron-LM python src/transformers/models/megatron_gpt2/convert_megatron_gpt2_checkpoint.py ...
#
# if you already have it cloned elsewhere, simply adjust the path to the existing path
#
# If the training was done using a Megatron-LM fork, e.g.,
# https://github.com/microsoft/Megatron-DeepSpeed/ then chances are that you need to have that one
# in your path, i.e., /path/to/Megatron-DeepSpeed/
#
import argparse
import os
import re
import zipfile
import torch
from transformers import AutoTokenizer, GPTaConfig
def lowerCAmelCase__ ( _UpperCamelCase : Optional[int] , _UpperCamelCase : str , _UpperCamelCase : Any=0 ) -> Any:
"""simple docstring"""
if name is None:
snake_case = None
else:
snake_case = "." * max(0 , spaces - 2 ) + "# {:" + str(5_0 - spaces ) + "s}"
snake_case = fmt.format(_lowerCAmelCase )
# Print and recurse (if needed).
if isinstance(_lowerCAmelCase , _lowerCAmelCase ):
if msg is not None:
print(_lowerCAmelCase )
for k in val.keys():
recursive_print(_lowerCAmelCase , val[k] , spaces + 2 )
elif isinstance(_lowerCAmelCase , torch.Tensor ):
print(_lowerCAmelCase , ':' , val.size() )
else:
print(_lowerCAmelCase , ':' , _lowerCAmelCase )
def lowerCAmelCase__ ( _UpperCamelCase : Tuple , _UpperCamelCase : List[Any] , _UpperCamelCase : str , _UpperCamelCase : Any , _UpperCamelCase : str ) -> Dict:
"""simple docstring"""
snake_case = param.size()
if checkpoint_version == 1.0:
# version 1.0 stores [num_heads * hidden_size * num_splits, :]
snake_case = (num_heads, hidden_size, num_splits) + input_shape[1:]
snake_case = param.view(*_lowerCAmelCase )
snake_case = param.transpose(0 , 2 )
snake_case = param.transpose(1 , 2 ).contiguous()
elif checkpoint_version >= 2.0:
# other versions store [num_heads * num_splits * hidden_size, :]
snake_case = (num_heads, num_splits, hidden_size) + input_shape[1:]
snake_case = param.view(*_lowerCAmelCase )
snake_case = param.transpose(0 , 1 ).contiguous()
snake_case = param.view(*_lowerCAmelCase )
return param
def lowerCAmelCase__ ( _UpperCamelCase : int , _UpperCamelCase : Union[str, Any] , _UpperCamelCase : List[str] ) -> str:
"""simple docstring"""
snake_case = {}
# old versions did not store training args
snake_case = input_state_dict.get('args' , _lowerCAmelCase )
if ds_args is not None:
# do not make the user write a config file when the exact dimensions/sizes are already in the checkpoint
# from pprint import pprint
# pprint(vars(ds_args))
snake_case = ds_args.padded_vocab_size
snake_case = ds_args.max_position_embeddings
snake_case = ds_args.hidden_size
snake_case = ds_args.num_layers
snake_case = ds_args.num_attention_heads
snake_case = ds_args.ffn_hidden_size
# pprint(config)
# The number of heads.
snake_case = config.n_head
# The hidden_size per head.
snake_case = config.n_embd // config.n_head
# Megatron-LM checkpoint version
if "checkpoint_version" in input_state_dict.keys():
snake_case = input_state_dict["checkpoint_version"]
else:
snake_case = 0.0
# The model.
snake_case = input_state_dict["model"]
# The language model.
snake_case = model["language_model"]
# The embeddings.
snake_case = lm["embedding"]
# The word embeddings.
snake_case = embeddings["word_embeddings"]["weight"]
# Truncate the embedding table to vocab_size rows.
snake_case = word_embeddings[: config.vocab_size, :]
snake_case = word_embeddings
# The position embeddings.
snake_case = embeddings["position_embeddings"]["weight"]
# Read the causal mask dimension (seqlen). [max_sequence_length, hidden_size]
snake_case = pos_embeddings.size(0 )
if n_positions != config.n_positions:
raise ValueError(
f"""pos_embeddings.max_sequence_length={n_positions} and config.n_positions={config.n_positions} don't match""" )
# Store the position embeddings.
snake_case = pos_embeddings
# The transformer.
snake_case = lm["transformer"] if "transformer" in lm.keys() else lm["encoder"]
# The regex to extract layer names.
snake_case = re.compile(r'layers\.(\d+)\.([a-z0-9_.]+)\.([a-z]+)' )
# The simple map of names for "automated" rules.
snake_case = {
"attention.dense": ".attn.c_proj.",
"self_attention.dense": ".attn.c_proj.",
"mlp.dense_h_to_4h": ".mlp.c_fc.",
"mlp.dense_4h_to_h": ".mlp.c_proj.",
}
# Extract the layers.
for key, val in transformer.items():
# Match the name.
snake_case = layer_re.match(_lowerCAmelCase )
# Stop if that's not a layer
if m is None:
break
# The index of the layer.
snake_case = int(m.group(1 ) )
# The name of the operation.
snake_case = m.group(2 )
# Is it a weight or a bias?
snake_case = m.group(3 )
# The name of the layer.
snake_case = f"""transformer.h.{layer_idx}"""
# For layernorm(s), simply store the layer norm.
if op_name.endswith('layernorm' ):
snake_case = "ln_1" if op_name.startswith('input' ) else "ln_2"
snake_case = val
# Transpose the QKV matrix.
elif (
op_name == "attention.query_key_value" or op_name == "self_attention.query_key_value"
) and weight_or_bias == "weight":
# Insert a tensor of 1x1xDxD bias.
snake_case = torch.tril(torch.ones((n_positions, n_positions) , dtype=torch.floataa ) ).view(
1 , 1 , _lowerCAmelCase , _lowerCAmelCase )
snake_case = causal_mask
# Insert a "dummy" tensor for masked_bias.
snake_case = torch.tensor(-1e4 , dtype=torch.floataa )
snake_case = masked_bias
snake_case = fix_query_key_value_ordering(_lowerCAmelCase , _lowerCAmelCase , 3 , _lowerCAmelCase , _lowerCAmelCase )
# Megatron stores (3*D) x D but transformers-GPT2 expects D x 3*D.
snake_case = out_val.transpose(0 , 1 ).contiguous()
# Store.
snake_case = out_val
# Transpose the bias.
elif (
op_name == "attention.query_key_value" or op_name == "self_attention.query_key_value"
) and weight_or_bias == "bias":
snake_case = fix_query_key_value_ordering(_lowerCAmelCase , _lowerCAmelCase , 3 , _lowerCAmelCase , _lowerCAmelCase )
# Store. No change of shape.
snake_case = out_val
# Transpose the weights.
elif weight_or_bias == "weight":
snake_case = megatron_to_transformers[op_name]
snake_case = val.transpose(0 , 1 )
# Copy the bias.
elif weight_or_bias == "bias":
snake_case = megatron_to_transformers[op_name]
snake_case = val
# DEBUG.
assert config.n_layer == layer_idx + 1
# The final layernorm.
snake_case = transformer["final_layernorm.weight"]
snake_case = transformer["final_layernorm.bias"]
# For LM head, transformers' wants the matrix to weight embeddings.
snake_case = word_embeddings
# It should be done!
return output_state_dict
def lowerCAmelCase__ ( ) -> List[str]:
"""simple docstring"""
snake_case = argparse.ArgumentParser()
parser.add_argument('--print-checkpoint-structure' , action='store_true' )
parser.add_argument(
'path_to_checkpoint' , type=_lowerCAmelCase , help='Path to the checkpoint file (.zip archive or direct .pt file)' , )
parser.add_argument(
'--config_file' , default='' , type=_lowerCAmelCase , help='An optional config json file describing the pre-trained model.' , )
snake_case = parser.parse_args()
# Extract the basename.
snake_case = os.path.dirname(args.path_to_checkpoint )
# Load the model.
# the .zip is very optional, let's keep it for backward compatibility
print(f"""Extracting PyTorch state dictionary from {args.path_to_checkpoint}""" )
if args.path_to_checkpoint.endswith('.zip' ):
with zipfile.ZipFile(args.path_to_checkpoint , 'r' ) as checkpoint:
with checkpoint.open('release/mp_rank_00/model_optim_rng.pt' ) as pytorch_dict:
snake_case = torch.load(_lowerCAmelCase , map_location='cpu' )
else:
snake_case = torch.load(args.path_to_checkpoint , map_location='cpu' )
snake_case = input_state_dict.get('args' , _lowerCAmelCase )
# Read the config, or default to the model released by NVIDIA.
if args.config_file == "":
if ds_args is not None:
if ds_args.bias_gelu_fusion:
snake_case = "gelu_fast"
elif ds_args.openai_gelu:
snake_case = "gelu_new"
else:
snake_case = "gelu"
else:
# in the very early days this used to be "gelu_new"
snake_case = "gelu_new"
# Spell out all parameters in case the defaults change.
snake_case = GPTaConfig(
vocab_size=5_0_2_5_7 , n_positions=1_0_2_4 , n_embd=1_0_2_4 , n_layer=2_4 , n_head=1_6 , n_inner=4_0_9_6 , activation_function=_lowerCAmelCase , resid_pdrop=0.1 , embd_pdrop=0.1 , attn_pdrop=0.1 , layer_norm_epsilon=1e-5 , initializer_range=0.02 , summary_type='cls_index' , summary_use_proj=_lowerCAmelCase , summary_activation=_lowerCAmelCase , summary_proj_to_labels=_lowerCAmelCase , summary_first_dropout=0.1 , scale_attn_weights=_lowerCAmelCase , use_cache=_lowerCAmelCase , bos_token_id=5_0_2_5_6 , eos_token_id=5_0_2_5_6 , )
else:
snake_case = GPTaConfig.from_json_file(args.config_file )
snake_case = ["GPT2LMHeadModel"]
# Convert.
print('Converting' )
snake_case = convert_megatron_checkpoint(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
# Print the structure of converted state dict.
if args.print_checkpoint_structure:
recursive_print(_lowerCAmelCase , _lowerCAmelCase )
# Add tokenizer class info to config
# see https://github.com/huggingface/transformers/issues/13906)
if ds_args is not None:
snake_case = ds_args.tokenizer_type
if tokenizer_type == "GPT2BPETokenizer":
snake_case = "gpt2"
elif tokenizer_type == "PretrainedFromHF":
snake_case = ds_args.tokenizer_name_or_path
else:
raise ValueError(f"""Unrecognized tokenizer_type {tokenizer_type}""" )
else:
snake_case = "gpt2"
snake_case = AutoTokenizer.from_pretrained(_lowerCAmelCase )
snake_case = type(_lowerCAmelCase ).__name__
snake_case = tokenizer_class
# Store the config to file.
print('Saving config' )
config.save_pretrained(_lowerCAmelCase )
# Save tokenizer based on args
print(f"""Adding {tokenizer_class} tokenizer files""" )
tokenizer.save_pretrained(_lowerCAmelCase )
# Store the state_dict to file.
snake_case = os.path.join(_lowerCAmelCase , 'pytorch_model.bin' )
print(f"""Saving checkpoint to \"{output_checkpoint_file}\"""" )
torch.save(_lowerCAmelCase , _lowerCAmelCase )
####################################################################################################
if __name__ == "__main__":
main()
####################################################################################################
| 150 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__lowerCamelCase : Any = {
"""configuration_electra""": ["""ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP""", """ElectraConfig""", """ElectraOnnxConfig"""],
"""tokenization_electra""": ["""ElectraTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : Dict = ["""ElectraTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : Tuple = [
"""ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""ElectraForCausalLM""",
"""ElectraForMaskedLM""",
"""ElectraForMultipleChoice""",
"""ElectraForPreTraining""",
"""ElectraForQuestionAnswering""",
"""ElectraForSequenceClassification""",
"""ElectraForTokenClassification""",
"""ElectraModel""",
"""ElectraPreTrainedModel""",
"""load_tf_weights_in_electra""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : List[Any] = [
"""TF_ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFElectraForMaskedLM""",
"""TFElectraForMultipleChoice""",
"""TFElectraForPreTraining""",
"""TFElectraForQuestionAnswering""",
"""TFElectraForSequenceClassification""",
"""TFElectraForTokenClassification""",
"""TFElectraModel""",
"""TFElectraPreTrainedModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : List[str] = [
"""FlaxElectraForCausalLM""",
"""FlaxElectraForMaskedLM""",
"""FlaxElectraForMultipleChoice""",
"""FlaxElectraForPreTraining""",
"""FlaxElectraForQuestionAnswering""",
"""FlaxElectraForSequenceClassification""",
"""FlaxElectraForTokenClassification""",
"""FlaxElectraModel""",
"""FlaxElectraPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_electra import ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP, ElectraConfig, ElectraOnnxConfig
from .tokenization_electra import ElectraTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_electra_fast import ElectraTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_electra import (
ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST,
ElectraForCausalLM,
ElectraForMaskedLM,
ElectraForMultipleChoice,
ElectraForPreTraining,
ElectraForQuestionAnswering,
ElectraForSequenceClassification,
ElectraForTokenClassification,
ElectraModel,
ElectraPreTrainedModel,
load_tf_weights_in_electra,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_electra import (
TF_ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST,
TFElectraForMaskedLM,
TFElectraForMultipleChoice,
TFElectraForPreTraining,
TFElectraForQuestionAnswering,
TFElectraForSequenceClassification,
TFElectraForTokenClassification,
TFElectraModel,
TFElectraPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_electra import (
FlaxElectraForCausalLM,
FlaxElectraForMaskedLM,
FlaxElectraForMultipleChoice,
FlaxElectraForPreTraining,
FlaxElectraForQuestionAnswering,
FlaxElectraForSequenceClassification,
FlaxElectraForTokenClassification,
FlaxElectraModel,
FlaxElectraPreTrainedModel,
)
else:
import sys
__lowerCamelCase : Any = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 52 | 0 |
import math
import unittest
from transformers import BioGptConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
BioGptForCausalLM,
BioGptForSequenceClassification,
BioGptForTokenClassification,
BioGptModel,
BioGptTokenizer,
)
from transformers.models.biogpt.modeling_biogpt import BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST
class __magic_name__ :
def __init__( self : str , lowerCamelCase__ : int , lowerCamelCase__ : Tuple=13 , lowerCamelCase__ : Any=7 , lowerCamelCase__ : Dict=True , lowerCamelCase__ : Optional[int]=True , lowerCamelCase__ : List[Any]=False , lowerCamelCase__ : Dict=True , lowerCamelCase__ : int=99 , lowerCamelCase__ : Optional[int]=32 , lowerCamelCase__ : int=5 , lowerCamelCase__ : List[str]=4 , lowerCamelCase__ : int=37 , lowerCamelCase__ : int="gelu" , lowerCamelCase__ : int=0.1 , lowerCamelCase__ : Union[str, Any]=0.1 , lowerCamelCase__ : Optional[int]=512 , lowerCamelCase__ : Dict=16 , lowerCamelCase__ : List[Any]=2 , lowerCamelCase__ : Optional[Any]=0.02 , lowerCamelCase__ : List[Any]=3 , lowerCamelCase__ : Any=4 , lowerCamelCase__ : Union[str, Any]=None , ) -> Dict:
'''simple docstring'''
UpperCamelCase__ : Optional[Any] = parent
UpperCamelCase__ : Optional[int] = batch_size
UpperCamelCase__ : Optional[int] = seq_length
UpperCamelCase__ : str = is_training
UpperCamelCase__ : int = use_input_mask
UpperCamelCase__ : Dict = use_token_type_ids
UpperCamelCase__ : Optional[int] = use_labels
UpperCamelCase__ : Tuple = vocab_size
UpperCamelCase__ : str = hidden_size
UpperCamelCase__ : Optional[int] = num_hidden_layers
UpperCamelCase__ : Union[str, Any] = num_attention_heads
UpperCamelCase__ : Optional[Any] = intermediate_size
UpperCamelCase__ : Optional[int] = hidden_act
UpperCamelCase__ : Tuple = hidden_dropout_prob
UpperCamelCase__ : Union[str, Any] = attention_probs_dropout_prob
UpperCamelCase__ : Union[str, Any] = max_position_embeddings
UpperCamelCase__ : Union[str, Any] = type_vocab_size
UpperCamelCase__ : Union[str, Any] = type_sequence_label_size
UpperCamelCase__ : Union[str, Any] = initializer_range
UpperCamelCase__ : List[Any] = num_labels
UpperCamelCase__ : Dict = num_choices
UpperCamelCase__ : Tuple = scope
def UpperCAmelCase__ ( self : Optional[Any] ) -> Optional[int]:
'''simple docstring'''
UpperCamelCase__ : Dict = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCamelCase__ : str = None
if self.use_input_mask:
UpperCamelCase__ : Optional[Any] = random_attention_mask([self.batch_size, self.seq_length] )
UpperCamelCase__ : Dict = None
if self.use_token_type_ids:
UpperCamelCase__ : int = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
UpperCamelCase__ : Any = None
UpperCamelCase__ : Dict = None
UpperCamelCase__ : Tuple = None
if self.use_labels:
UpperCamelCase__ : Optional[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCamelCase__ : Any = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
UpperCamelCase__ : Any = ids_tensor([self.batch_size] , self.num_choices )
UpperCamelCase__ : Union[str, Any] = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCAmelCase__ ( self : Dict ) -> Optional[Any]:
'''simple docstring'''
return BioGptConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=lowerCamelCase__ , initializer_range=self.initializer_range , )
def UpperCAmelCase__ ( self : Dict , lowerCamelCase__ : str , lowerCamelCase__ : Union[str, Any] , lowerCamelCase__ : Any , lowerCamelCase__ : Optional[int] , lowerCamelCase__ : List[str] , lowerCamelCase__ : Optional[Any] , lowerCamelCase__ : Optional[int] ) -> Optional[Any]:
'''simple docstring'''
UpperCamelCase__ : Optional[int] = BioGptModel(config=lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
UpperCamelCase__ : str = model(lowerCamelCase__ , attention_mask=lowerCamelCase__ )
UpperCamelCase__ : Dict = model(lowerCamelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCAmelCase__ ( self : int , lowerCamelCase__ : Dict , lowerCamelCase__ : Any , lowerCamelCase__ : Union[str, Any] , lowerCamelCase__ : List[str] , lowerCamelCase__ : Dict , lowerCamelCase__ : List[Any] , lowerCamelCase__ : str , lowerCamelCase__ : List[str] , lowerCamelCase__ : Union[str, Any] , ) -> List[str]:
'''simple docstring'''
UpperCamelCase__ : Tuple = BioGptForCausalLM(config=lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
UpperCamelCase__ : Optional[int] = model(lowerCamelCase__ , attention_mask=lowerCamelCase__ , token_type_ids=lowerCamelCase__ , labels=lowerCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCAmelCase__ ( self : Union[str, Any] , lowerCamelCase__ : List[Any] , lowerCamelCase__ : Any , lowerCamelCase__ : Optional[Any] , lowerCamelCase__ : Optional[int] , lowerCamelCase__ : Tuple , *lowerCamelCase__ : Optional[Any] ) -> Optional[int]:
'''simple docstring'''
UpperCamelCase__ : int = BioGptModel(config=lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
# create attention mask
UpperCamelCase__ : Union[str, Any] = torch.ones(input_ids.shape , dtype=torch.long , device=lowerCamelCase__ )
UpperCamelCase__ : str = self.seq_length // 2
UpperCamelCase__ : Dict = 0
# first forward pass
UpperCamelCase__ , UpperCamelCase__ : List[str] = model(lowerCamelCase__ , attention_mask=lowerCamelCase__ ).to_tuple()
# create hypothetical next token and extent to next_input_ids
UpperCamelCase__ : Union[str, Any] = ids_tensor((self.batch_size, 1) , config.vocab_size )
# change a random masked slice from input_ids
UpperCamelCase__ : List[Any] = ids_tensor((1,) , lowerCamelCase__ ).item() + 1
UpperCamelCase__ : List[Any] = ids_tensor((self.batch_size, 1) , config.vocab_size ).squeeze(-1 )
UpperCamelCase__ : Dict = random_other_next_tokens
# append to next input_ids and attn_mask
UpperCamelCase__ : List[str] = torch.cat([input_ids, next_tokens] , dim=-1 )
UpperCamelCase__ : str = torch.cat(
[attn_mask, torch.ones((attn_mask.shape[0], 1) , dtype=torch.long , device=lowerCamelCase__ )] , dim=1 , )
# get two different outputs
UpperCamelCase__ : Tuple = model(lowerCamelCase__ , attention_mask=lowerCamelCase__ )['''last_hidden_state''']
UpperCamelCase__ : Optional[int] = model(lowerCamelCase__ , past_key_values=lowerCamelCase__ , attention_mask=lowerCamelCase__ )['''last_hidden_state''']
# select random slice
UpperCamelCase__ : Union[str, Any] = ids_tensor((1,) , output_from_past.shape[-1] ).item()
UpperCamelCase__ : int = output_from_no_past[:, -1, random_slice_idx].detach()
UpperCamelCase__ : Optional[Any] = output_from_past[:, 0, random_slice_idx].detach()
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(lowerCamelCase__ , lowerCamelCase__ , atol=1E-3 ) )
def UpperCAmelCase__ ( self : List[Any] , lowerCamelCase__ : Tuple , lowerCamelCase__ : Tuple , lowerCamelCase__ : Union[str, Any] , lowerCamelCase__ : Optional[int] , lowerCamelCase__ : Dict , *lowerCamelCase__ : str ) -> int:
'''simple docstring'''
UpperCamelCase__ : Optional[int] = BioGptModel(config=lowerCamelCase__ ).to(lowerCamelCase__ ).eval()
UpperCamelCase__ : Any = torch.ones(input_ids.shape , dtype=torch.long , device=lowerCamelCase__ )
# first forward pass
UpperCamelCase__ : str = model(lowerCamelCase__ , attention_mask=lowerCamelCase__ , use_cache=lowerCamelCase__ )
UpperCamelCase__ , UpperCamelCase__ : List[str] = outputs.to_tuple()
# create hypothetical multiple next token and extent to next_input_ids
UpperCamelCase__ : int = ids_tensor((self.batch_size, 3) , config.vocab_size )
UpperCamelCase__ : Optional[Any] = ids_tensor((self.batch_size, 3) , 2 )
# append to next input_ids and
UpperCamelCase__ : str = torch.cat([input_ids, next_tokens] , dim=-1 )
UpperCamelCase__ : Any = torch.cat([attention_mask, next_attn_mask] , dim=-1 )
UpperCamelCase__ : int = model(lowerCamelCase__ , attention_mask=lowerCamelCase__ )['''last_hidden_state''']
UpperCamelCase__ : int = model(lowerCamelCase__ , attention_mask=lowerCamelCase__ , past_key_values=lowerCamelCase__ )[
'''last_hidden_state'''
]
# select random slice
UpperCamelCase__ : int = ids_tensor((1,) , output_from_past.shape[-1] ).item()
UpperCamelCase__ : Any = output_from_no_past[:, -3:, random_slice_idx].detach()
UpperCamelCase__ : List[str] = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(lowerCamelCase__ , lowerCamelCase__ , atol=1E-3 ) )
def UpperCAmelCase__ ( self : List[Any] , lowerCamelCase__ : Optional[int] , lowerCamelCase__ : str , lowerCamelCase__ : Tuple , lowerCamelCase__ : Dict , lowerCamelCase__ : Any , *lowerCamelCase__ : List[str] , lowerCamelCase__ : Tuple=False ) -> Optional[Any]:
'''simple docstring'''
UpperCamelCase__ : List[str] = BioGptForCausalLM(lowerCamelCase__ )
model.to(lowerCamelCase__ )
if gradient_checkpointing:
model.gradient_checkpointing_enable()
UpperCamelCase__ : List[str] = model(lowerCamelCase__ , labels=lowerCamelCase__ )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
result.loss.backward()
def UpperCAmelCase__ ( self : List[str] , lowerCamelCase__ : Any , *lowerCamelCase__ : List[Any] ) -> str:
'''simple docstring'''
UpperCamelCase__ : Tuple = BioGptModel(lowerCamelCase__ )
UpperCamelCase__ : List[Any] = model.config.initializer_range / math.sqrt(2 * model.config.num_hidden_layers )
for key in model.state_dict().keys():
if "c_proj" in key and "weight" in key:
self.parent.assertLessEqual(abs(torch.std(model.state_dict()[key] ) - model_std ) , 0.001 )
self.parent.assertLessEqual(abs(torch.mean(model.state_dict()[key] ) - 0.0 ) , 0.01 )
def UpperCAmelCase__ ( self : Tuple , lowerCamelCase__ : int , lowerCamelCase__ : Tuple , lowerCamelCase__ : Optional[Any] , lowerCamelCase__ : Any , lowerCamelCase__ : str , *lowerCamelCase__ : Optional[Any] ) -> int:
'''simple docstring'''
UpperCamelCase__ : List[str] = self.num_labels
UpperCamelCase__ : List[Any] = BioGptForTokenClassification(lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
UpperCamelCase__ : str = model(lowerCamelCase__ , attention_mask=lowerCamelCase__ , token_type_ids=lowerCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def UpperCAmelCase__ ( self : Any ) -> Optional[Any]:
'''simple docstring'''
UpperCamelCase__ : str = self.prepare_config_and_inputs()
(
(
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) ,
) : int = config_and_inputs
UpperCamelCase__ : Optional[Any] = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class __magic_name__ ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , unittest.TestCase):
A: List[Any] = (
(BioGptModel, BioGptForCausalLM, BioGptForSequenceClassification, BioGptForTokenClassification)
if is_torch_available()
else ()
)
A: Union[str, Any] = (BioGptForCausalLM,) if is_torch_available() else ()
A: Optional[Any] = (
{
"feature-extraction": BioGptModel,
"text-classification": BioGptForSequenceClassification,
"text-generation": BioGptForCausalLM,
"token-classification": BioGptForTokenClassification,
"zero-shot": BioGptForSequenceClassification,
}
if is_torch_available()
else {}
)
A: Optional[Any] = False
def UpperCAmelCase__ ( self : int ) -> List[Any]:
'''simple docstring'''
UpperCamelCase__ : Optional[int] = BioGptModelTester(self )
UpperCamelCase__ : Any = ConfigTester(self , config_class=lowerCamelCase__ , hidden_size=37 )
def UpperCAmelCase__ ( self : Tuple ) -> str:
'''simple docstring'''
self.config_tester.run_common_tests()
def UpperCAmelCase__ ( self : Union[str, Any] ) -> Tuple:
'''simple docstring'''
UpperCamelCase__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase__ )
def UpperCAmelCase__ ( self : List[str] ) -> Dict:
'''simple docstring'''
UpperCamelCase__ : Tuple = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
UpperCamelCase__ : Optional[int] = type
self.model_tester.create_and_check_model(*lowerCamelCase__ )
def UpperCAmelCase__ ( self : Optional[int] ) -> List[str]:
'''simple docstring'''
UpperCamelCase__ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_model_attention_mask_past(*lowerCamelCase__ )
def UpperCAmelCase__ ( self : Any ) -> List[Any]:
'''simple docstring'''
UpperCamelCase__ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_forward_and_backwards(*lowerCamelCase__ , gradient_checkpointing=lowerCamelCase__ )
def UpperCAmelCase__ ( self : Union[str, Any] ) -> Tuple:
'''simple docstring'''
UpperCamelCase__ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_model_past_large_inputs(*lowerCamelCase__ )
def UpperCAmelCase__ ( self : int ) -> Optional[int]:
'''simple docstring'''
UpperCamelCase__ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_weight_initialization(*lowerCamelCase__ )
def UpperCAmelCase__ ( self : Optional[Any] ) -> List[Any]:
'''simple docstring'''
UpperCamelCase__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_for_token_classification(*lowerCamelCase__ )
@slow
def UpperCAmelCase__ ( self : str ) -> int:
'''simple docstring'''
UpperCamelCase__ : Any = BioGptForCausalLM.from_pretrained('''microsoft/biogpt''' )
model.to(lowerCamelCase__ )
UpperCamelCase__ : List[str] = BioGptTokenizer.from_pretrained('''microsoft/biogpt''' )
UpperCamelCase__ : int = '''left'''
# Define PAD Token = EOS Token = 50256
UpperCamelCase__ : Tuple = tokenizer.eos_token
UpperCamelCase__ : Optional[Any] = model.config.eos_token_id
# use different length sentences to test batching
UpperCamelCase__ : str = [
'''Hello, my dog is a little''',
'''Today, I''',
]
UpperCamelCase__ : Union[str, Any] = tokenizer(lowerCamelCase__ , return_tensors='''pt''' , padding=lowerCamelCase__ )
UpperCamelCase__ : Any = inputs['''input_ids'''].to(lowerCamelCase__ )
UpperCamelCase__ : List[str] = model.generate(
input_ids=lowerCamelCase__ , attention_mask=inputs['''attention_mask'''].to(lowerCamelCase__ ) , )
UpperCamelCase__ : int = tokenizer(sentences[0] , return_tensors='''pt''' ).input_ids.to(lowerCamelCase__ )
UpperCamelCase__ : Union[str, Any] = model.generate(input_ids=lowerCamelCase__ )
UpperCamelCase__ : List[str] = inputs_non_padded.shape[-1] - inputs['''attention_mask'''][-1].long().sum().cpu().item()
UpperCamelCase__ : Optional[Any] = tokenizer(sentences[1] , return_tensors='''pt''' ).input_ids.to(lowerCamelCase__ )
UpperCamelCase__ : Optional[Any] = model.generate(input_ids=lowerCamelCase__ , max_length=model.config.max_length - num_paddings )
UpperCamelCase__ : Dict = tokenizer.batch_decode(lowerCamelCase__ , skip_special_tokens=lowerCamelCase__ )
UpperCamelCase__ : Dict = tokenizer.decode(output_non_padded[0] , skip_special_tokens=lowerCamelCase__ )
UpperCamelCase__ : int = tokenizer.decode(output_padded[0] , skip_special_tokens=lowerCamelCase__ )
UpperCamelCase__ : str = [
'''Hello, my dog is a little bit bigger than a little bit.''',
'''Today, I have a good idea of how to use the information''',
]
self.assertListEqual(lowerCamelCase__ , lowerCamelCase__ )
self.assertListEqual(lowerCamelCase__ , [non_padded_sentence, padded_sentence] )
@slow
def UpperCAmelCase__ ( self : Union[str, Any] ) -> int:
'''simple docstring'''
for model_name in BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCamelCase__ : str = BioGptModel.from_pretrained(lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
def UpperCAmelCase__ ( self : str ) -> str:
'''simple docstring'''
UpperCamelCase__ , UpperCamelCase__ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
UpperCamelCase__ : Optional[Any] = 3
UpperCamelCase__ : int = input_dict['''input_ids''']
UpperCamelCase__ : int = input_ids.ne(1 ).to(lowerCamelCase__ )
UpperCamelCase__ : List[str] = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
UpperCamelCase__ : Tuple = BioGptForSequenceClassification(lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
UpperCamelCase__ : Optional[int] = model(lowerCamelCase__ , attention_mask=lowerCamelCase__ , labels=lowerCamelCase__ )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def UpperCAmelCase__ ( self : List[str] ) -> List[Any]:
'''simple docstring'''
UpperCamelCase__ , UpperCamelCase__ : Dict = self.model_tester.prepare_config_and_inputs_for_common()
UpperCamelCase__ : Dict = 3
UpperCamelCase__ : Tuple = '''multi_label_classification'''
UpperCamelCase__ : int = input_dict['''input_ids''']
UpperCamelCase__ : List[str] = input_ids.ne(1 ).to(lowerCamelCase__ )
UpperCamelCase__ : int = ids_tensor(
[self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float )
UpperCamelCase__ : Any = BioGptForSequenceClassification(lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
UpperCamelCase__ : Union[str, Any] = model(lowerCamelCase__ , attention_mask=lowerCamelCase__ , labels=lowerCamelCase__ )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
@require_torch
class __magic_name__ ( unittest.TestCase):
@slow
def UpperCAmelCase__ ( self : Optional[int] ) -> List[str]:
'''simple docstring'''
UpperCamelCase__ : Dict = BioGptForCausalLM.from_pretrained('''microsoft/biogpt''' )
UpperCamelCase__ : Any = torch.tensor([[2, 4805, 9, 656, 21]] )
UpperCamelCase__ : List[Any] = model(lowerCamelCase__ )[0]
UpperCamelCase__ : Any = 42384
UpperCamelCase__ : Dict = torch.Size((1, 5, vocab_size) )
self.assertEqual(output.shape , lowerCamelCase__ )
UpperCamelCase__ : Dict = torch.tensor(
[[[-9.5236, -9.8918, 10.4557], [-11.0469, -9.6423, 8.1022], [-8.8664, -7.8826, 5.5325]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , lowerCamelCase__ , atol=1E-4 ) )
@slow
def UpperCAmelCase__ ( self : Tuple ) -> str:
'''simple docstring'''
UpperCamelCase__ : Union[str, Any] = BioGptTokenizer.from_pretrained('''microsoft/biogpt''' )
UpperCamelCase__ : Any = BioGptForCausalLM.from_pretrained('''microsoft/biogpt''' )
model.to(lowerCamelCase__ )
torch.manual_seed(0 )
UpperCamelCase__ : int = tokenizer('''COVID-19 is''' , return_tensors='''pt''' ).to(lowerCamelCase__ )
UpperCamelCase__ : int = model.generate(
**lowerCamelCase__ , min_length=100 , max_length=1024 , num_beams=5 , early_stopping=lowerCamelCase__ , )
UpperCamelCase__ : str = tokenizer.decode(output_ids[0] , skip_special_tokens=lowerCamelCase__ )
UpperCamelCase__ : List[str] = (
'''COVID-19 is a global pandemic caused by severe acute respiratory syndrome coronavirus 2 (SARS-CoV-2), the'''
''' causative agent of coronavirus disease 2019 (COVID-19), which has spread to more than 200 countries and'''
''' territories, including the United States (US), Canada, Australia, New Zealand, the United Kingdom (UK),'''
''' and the United States of America (USA), as of March 11, 2020, with more than 800,000 confirmed cases and'''
''' more than 800,000 deaths.'''
)
self.assertEqual(lowerCamelCase__ , lowerCamelCase__ )
| 51 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
__UpperCamelCase : List[str] = {
"configuration_mobilenet_v2": [
"MOBILENET_V2_PRETRAINED_CONFIG_ARCHIVE_MAP",
"MobileNetV2Config",
"MobileNetV2OnnxConfig",
],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : Union[str, Any] = ["MobileNetV2FeatureExtractor"]
__UpperCamelCase : List[str] = ["MobileNetV2ImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : List[str] = [
"MOBILENET_V2_PRETRAINED_MODEL_ARCHIVE_LIST",
"MobileNetV2ForImageClassification",
"MobileNetV2ForSemanticSegmentation",
"MobileNetV2Model",
"MobileNetV2PreTrainedModel",
"load_tf_weights_in_mobilenet_v2",
]
if TYPE_CHECKING:
from .configuration_mobilenet_va import (
MOBILENET_V2_PRETRAINED_CONFIG_ARCHIVE_MAP,
MobileNetVaConfig,
MobileNetVaOnnxConfig,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_mobilenet_va import MobileNetVaFeatureExtractor
from .image_processing_mobilenet_va import MobileNetVaImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mobilenet_va import (
MOBILENET_V2_PRETRAINED_MODEL_ARCHIVE_LIST,
MobileNetVaForImageClassification,
MobileNetVaForSemanticSegmentation,
MobileNetVaModel,
MobileNetVaPreTrainedModel,
load_tf_weights_in_mobilenet_va,
)
else:
import sys
__UpperCamelCase : Dict = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 51 | 1 |
"""simple docstring"""
a_ = 'Tobias Carryer'
from time import time
class UpperCAmelCase_ :
def __init__( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_=int(time() ) ) -> Optional[int]: # noqa: B008
__lowercase : str = multiplier
__lowercase : int = increment
__lowercase : str = modulo
__lowercase : Dict = seed
def _lowerCamelCase ( self ) -> str:
__lowercase : Tuple = (self.multiplier * self.seed + self.increment) % self.modulo
return self.seed
if __name__ == "__main__":
# Show the LCG in action.
a_ = LinearCongruentialGenerator(1_6_6_4_5_2_5, 1_0_1_3_9_0_4_2_2_3, 2 << 3_1)
while True:
print(lcg.next_number())
| 249 |
"""simple docstring"""
import gzip
import hashlib
import json
import multiprocessing
import os
import re
import shutil
import time
from pathlib import Path
import numpy as np
from arguments import PreprocessingArguments
from datasets import load_dataset
from minhash_deduplication import deduplicate_dataset
from transformers import AutoTokenizer, HfArgumentParser
lowerCamelCase_ : Any = re.compile(r'\s+')
def UpperCAmelCase__ ( _UpperCAmelCase ):
"""simple docstring"""
return {"hash": hashlib.mda(re.sub(_UpperCAmelCase , '' , example['content'] ).encode('utf-8' ) ).hexdigest()}
def UpperCAmelCase__ ( _UpperCAmelCase ):
"""simple docstring"""
A_ : List[str] = [len(_UpperCAmelCase ) for line in example['content'].splitlines()]
return {"line_mean": np.mean(_UpperCAmelCase ), "line_max": max(_UpperCAmelCase )}
def UpperCAmelCase__ ( _UpperCAmelCase ):
"""simple docstring"""
A_ : Any = np.mean([c.isalnum() for c in example['content']] )
return {"alpha_frac": alpha_frac}
def UpperCAmelCase__ ( _UpperCAmelCase , _UpperCAmelCase ):
"""simple docstring"""
if example["hash"] in uniques:
uniques.remove(example['hash'] )
return True
else:
return False
def UpperCAmelCase__ ( _UpperCAmelCase , _UpperCAmelCase=5 ):
"""simple docstring"""
A_ : Optional[int] = ['auto-generated', 'autogenerated', 'automatically generated']
A_ : List[str] = example['content'].splitlines()
for _, line in zip(range(_UpperCAmelCase ) , _UpperCAmelCase ):
for keyword in keywords:
if keyword in line.lower():
return {"autogenerated": True}
else:
return {"autogenerated": False}
def UpperCAmelCase__ ( _UpperCAmelCase , _UpperCAmelCase=5 , _UpperCAmelCase=0.05 ):
"""simple docstring"""
A_ : Any = ['unit tests', 'test file', 'configuration file']
A_ : Dict = example['content'].splitlines()
A_ : List[Any] = 0
A_ : str = 0
# first test
for _, line in zip(range(_UpperCAmelCase ) , _UpperCAmelCase ):
for keyword in keywords:
if keyword in line.lower():
return {"config_or_test": True}
# second test
A_ : Tuple = example['content'].count('\n' )
A_ : Tuple = int(coeff * nlines )
for line in lines:
count_config += line.lower().count('config' )
count_test += line.lower().count('test' )
if count_config > threshold or count_test > threshold:
return {"config_or_test": True}
return {"config_or_test": False}
def UpperCAmelCase__ ( _UpperCAmelCase ):
"""simple docstring"""
A_ : List[Any] = ['def ', 'class ', 'for ', 'while ']
A_ : Tuple = example['content'].splitlines()
for line in lines:
for keyword in keywords:
if keyword in line.lower():
return {"has_no_keywords": False}
return {"has_no_keywords": True}
def UpperCAmelCase__ ( _UpperCAmelCase , _UpperCAmelCase=4 ):
"""simple docstring"""
A_ : Union[str, Any] = example['content'].splitlines()
A_ : Any = 0
for line in lines:
counter += line.lower().count('=' )
if counter > minimum:
return {"has_few_assignments": False}
return {"has_few_assignments": True}
def UpperCAmelCase__ ( _UpperCAmelCase ):
"""simple docstring"""
A_ : Optional[Any] = tokenizer(example['content'] , truncation=_UpperCAmelCase )['input_ids']
A_ : Dict = len(example['content'] ) / len(_UpperCAmelCase )
return {"ratio": ratio}
def UpperCAmelCase__ ( _UpperCAmelCase ):
"""simple docstring"""
A_ : Any = {}
results.update(get_hash(_UpperCAmelCase ) )
results.update(line_stats(_UpperCAmelCase ) )
results.update(alpha_stats(_UpperCAmelCase ) )
results.update(char_token_ratio(_UpperCAmelCase ) )
results.update(is_autogenerated(_UpperCAmelCase ) )
results.update(is_config_or_test(_UpperCAmelCase ) )
results.update(has_no_keywords(_UpperCAmelCase ) )
results.update(has_few_assignments(_UpperCAmelCase ) )
return results
def UpperCAmelCase__ ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
"""simple docstring"""
if not check_uniques(_UpperCAmelCase , _UpperCAmelCase ):
return False
elif example["autogenerated"]:
return False
elif example["line_max"] > args.line_max:
return False
elif example["line_mean"] > args.line_mean:
return False
elif example["alpha_frac"] < args.alpha_frac:
return False
elif example["ratio"] < args.min_token_ratio:
return False
elif example["config_or_test"] and np.random.rand() <= args.filter_proba:
return False
elif example["has_no_keywords"] and np.random.rand() <= args.filter_proba:
return False
elif example["has_few_assignments"]:
return False
else:
return True
def UpperCAmelCase__ ( _UpperCAmelCase ):
"""simple docstring"""
with open(_UpperCAmelCase , 'rb' ) as f_in:
with gzip.open(str(_UpperCAmelCase ) + '.gz' , 'wb' , compresslevel=6 ) as f_out:
shutil.copyfileobj(_UpperCAmelCase , _UpperCAmelCase )
os.unlink(_UpperCAmelCase )
# Settings
lowerCamelCase_ : Optional[int] = HfArgumentParser(PreprocessingArguments)
lowerCamelCase_ : Optional[Any] = parser.parse_args()
if args.num_workers is None:
lowerCamelCase_ : int = multiprocessing.cpu_count()
lowerCamelCase_ : Tuple = AutoTokenizer.from_pretrained(args.tokenizer_dir)
# Load dataset
lowerCamelCase_ : Tuple = time.time()
lowerCamelCase_ : Tuple = load_dataset(args.dataset_name, split='train')
print(F"Time to load dataset: {time.time()-t_start:.2f}")
# Run preprocessing
lowerCamelCase_ : List[str] = time.time()
lowerCamelCase_ : Optional[int] = ds.map(preprocess, num_proc=args.num_workers)
print(F"Time to preprocess dataset: {time.time()-t_start:.2f}")
# Deduplicate hashes
lowerCamelCase_ : int = set(ds.unique('hash'))
lowerCamelCase_ : Union[str, Any] = len(uniques) / len(ds)
print(F"Fraction of duplicates: {1-frac:.2%}")
# Deduplicate data and apply heuristics
lowerCamelCase_ : Optional[int] = time.time()
lowerCamelCase_ : Tuple = ds.filter(filter, fn_kwargs={'uniques': uniques, 'args': args})
print(F"Time to filter dataset: {time.time()-t_start:.2f}")
print(F"Size of filtered dataset: {len(ds_filter)}")
# Deduplicate with minhash and jaccard similarity
if args.near_deduplication:
lowerCamelCase_ : Union[str, Any] = time.time()
lowerCamelCase_ , lowerCamelCase_ : str = deduplicate_dataset(ds_filter, args.jaccard_threshold)
print(F"Time to deduplicate dataset: {time.time()-t_start:.2f}")
print(F"Size of deduplicate dataset: {len(ds_filter)}")
# Save data in batches of samples_per_file
lowerCamelCase_ : Tuple = Path(args.output_dir)
output_dir.mkdir(exist_ok=True)
# save duplicate_clusters in the output_dir as artifacts
# not sure it is the right place the save it
if args.near_deduplication:
with open(output_dir / 'duplicate_clusters.json', 'w') as f:
json.dump(duplicate_clusters, f)
lowerCamelCase_ : Optional[Any] = output_dir / 'data'
data_dir.mkdir(exist_ok=True)
lowerCamelCase_ : List[str] = time.time()
for file_number, index in enumerate(range(0, len(ds_filter), args.samples_per_file)):
lowerCamelCase_ : Optional[int] = str(data_dir / F"file-{file_number+1:012}.json")
lowerCamelCase_ : List[str] = min(len(ds_filter), index + args.samples_per_file)
ds_filter.select(list(range(index, end_index))).to_json(file_path)
compress_file(file_path)
print(F"Time to save dataset: {time.time()-t_start:.2f}") | 286 | 0 |
def SCREAMING_SNAKE_CASE ( snake_case_ : list[list[float]] ):
snake_case__ : list[list[float]] = []
for data in source_data:
for i, el in enumerate(snake_case_ ):
if len(snake_case_ ) < i + 1:
data_lists.append([] )
data_lists[i].append(float(snake_case_ ) )
return data_lists
def SCREAMING_SNAKE_CASE ( snake_case_ : list[list[float]] , snake_case_ : list[int] ):
snake_case__ : list[list[float]] = []
for dlist, weight in zip(snake_case_ , snake_case_ ):
snake_case__ : Union[str, Any] = min(snake_case_ )
snake_case__ : Optional[Any] = max(snake_case_ )
snake_case__ : list[float] = []
# for weight 0 score is 1 - actual score
if weight == 0:
for item in dlist:
try:
score.append(1 - ((item - mind) / (maxd - mind)) )
except ZeroDivisionError:
score.append(1 )
elif weight == 1:
for item in dlist:
try:
score.append((item - mind) / (maxd - mind) )
except ZeroDivisionError:
score.append(0 )
# weight not 0 or 1
else:
snake_case__ : Optional[int] = F'''Invalid weight of {weight:f} provided'''
raise ValueError(snake_case_ )
score_lists.append(snake_case_ )
return score_lists
def SCREAMING_SNAKE_CASE ( snake_case_ : list[list[float]] ):
snake_case__ : list[float] = [0 for i in range(len(score_lists[0] ) )]
for slist in score_lists:
for j, ele in enumerate(snake_case_ ):
snake_case__ : Union[str, Any] = final_scores[j] + ele
return final_scores
def SCREAMING_SNAKE_CASE ( snake_case_ : list[list[float]] , snake_case_ : list[int] ):
snake_case__ : int = get_data(snake_case_ )
snake_case__ : str = calculate_each_score(snake_case_ , snake_case_ )
snake_case__ : List[str] = generate_final_scores(snake_case_ )
# append scores to source data
for i, ele in enumerate(snake_case_ ):
source_data[i].append(snake_case_ )
return source_data
| 354 |
__lowerCamelCase : Optional[int] = """Tobias Carryer"""
from time import time
class SCREAMING_SNAKE_CASE__ :
"""simple docstring"""
def __init__( self : List[Any] , __A : List[Any] , __A : Optional[int] , __A : List[str] , __A : Dict=int(time() ) ): # noqa: B008
snake_case__ : List[Any] = multiplier
snake_case__ : Optional[int] = increment
snake_case__ : Optional[int] = modulo
snake_case__ : Union[str, Any] = seed
def _lowercase ( self : str ):
snake_case__ : Union[str, Any] = (self.multiplier * self.seed + self.increment) % self.modulo
return self.seed
if __name__ == "__main__":
# Show the LCG in action.
__lowerCamelCase : int = LinearCongruentialGenerator(166_4525, 10_1390_4223, 2 << 31)
while True:
print(lcg.next_number())
| 286 | 0 |
import importlib
import shutil
import threading
import warnings
from typing import List
import fsspec
import fsspec.asyn
from . import compression
from .hffilesystem import HfFileSystem
a_ :Any = importlib.util.find_spec("s3fs") is not None
if _has_safs:
from .safilesystem import SaFileSystem # noqa: F401
a_ :int = [
compression.BzaFileSystem,
compression.GzipFileSystem,
compression.LzaFileSystem,
compression.XzFileSystem,
compression.ZstdFileSystem,
]
# Register custom filesystems
for fs_class in COMPRESSION_FILESYSTEMS + [HfFileSystem]:
if fs_class.protocol in fsspec.registry and fsspec.registry[fs_class.protocol] is not fs_class:
warnings.warn(F"""A filesystem protocol was already set for {fs_class.protocol} and will be overwritten.""")
fsspec.register_implementation(fs_class.protocol, fs_class, clobber=True)
def lowercase_ (A : Optional[Any] ):
if "://" in dataset_path:
snake_case__ : int = dataset_path.split('://' )[1]
return dataset_path
def lowercase_ (A : Any ):
if fs is not None and fs.protocol != "file":
return True
else:
return False
def lowercase_ (A : int , A : int , A : Any ):
snake_case__ : Optional[Any] = not is_remote_filesystem(__A )
if is_local:
# LocalFileSystem.mv does copy + rm, it is more efficient to simply move a local directory
shutil.move(fs._strip_protocol(__A ) , fs._strip_protocol(__A ) )
else:
fs.mv(__A , __A , recursive=__A )
def lowercase_ ():
if hasattr(fsspec.asyn , 'reset_lock' ):
# for future fsspec>2022.05.0
fsspec.asyn.reset_lock()
else:
snake_case__ : Optional[int] = None
snake_case__ : Dict = None
snake_case__ : List[str] = threading.Lock()
| 277 | from .dependency_versions_table import deps
from .utils.versions import require_version, require_version_core
# define which module versions we always want to check at run time
# (usually the ones defined in `install_requires` in setup.py)
#
# order specific notes:
# - tqdm must be checked before tokenizers
UpperCamelCase__ = [
'python',
'tqdm',
'regex',
'requests',
'packaging',
'filelock',
'numpy',
'tokenizers',
'huggingface-hub',
'safetensors',
'accelerate',
'pyyaml',
]
for pkg in pkgs_to_check_at_runtime:
if pkg in deps:
if pkg == "tokenizers":
# must be loaded here, or else tqdm check may fail
from .utils import is_tokenizers_available
if not is_tokenizers_available():
continue # not required, check version only if installed
elif pkg == "accelerate":
# must be loaded here, or else tqdm check may fail
from .utils import is_accelerate_available
# Maybe switch to is_torch_available in the future here so that Accelerate is hard dep of
# Transformers with PyTorch
if not is_accelerate_available():
continue # not required, check version only if installed
require_version_core(deps[pkg])
else:
raise ValueError(f'''can\'t find {pkg} in {deps.keys()}, check dependency_versions_table.py''')
def lowerCAmelCase_ ( __A, __A=None ) -> Dict:
'''simple docstring'''
require_version(deps[pkg], __A )
| 65 | 0 |
# HF Trainer benchmarking tool
#
# This tool can be used to run and compare multiple dimensions of the HF Trainers args.
#
# It then prints a report once in github format with all the information that needs to be shared
# with others and second time in a console-friendly format, so it's easier to use for tuning things up.
#
# The main idea is:
#
# ./trainer-benchmark.py --base-cmd '<cmd args that don't change>' \
# --variations '--tf32 0|--tf32 1' '--fp16 0|--fp16 1|--bf16 1' \
# --target-metric-key train_samples_per_second
#
# The variations can be any command line argument that you want to compare and not just dtype as in
# the example.
#
# --variations allows you to compare variations in multiple dimensions.
#
# as the first dimention has 2 options and the second 3 in our example, this will run the trainer 6
# times adding one of:
#
# 1. --tf32 0 --fp16 0
# 2. --tf32 0 --fp16 1
# 3. --tf32 0 --bf16 1
# 4. --tf32 1 --fp16 0
# 5. --tf32 1 --fp16 1
# 6. --tf32 1 --bf16 1
#
# and print the results. This is just a cartesian product - and more than 2 dimensions can be used.
#
# If you want to rely on defaults, this:
# --variations '--tf32 0|--tf32 1' '--fp16 0|--fp16 1|--bf16 1'
# is identical to this:
# --variations '--tf32 0|--tf32 1' '|--fp16|--bf16'
#
# the leading empty variation in the 2nd dimension is a valid variation.
#
# So here we get the following 6 variations:
#
# 1. --tf32 0
# 2. --tf32 0 --fp16
# 3. --tf32 0 --bf16
# 4. --tf32 1
# 5. --tf32 1 --fp16
# 6. --tf32 1 --bf16
#
# In this particular case we don't know what the default tf32 setting is as it's normally
# pytorch-version dependent). That's why it's best to do an explicit setting of each variation:
# `--tf32 0|--tf32 1`
#
# Here is a full example of a train:
#
# CUDA_VISIBLE_DEVICES=0 python ./scripts/benchmark/trainer-benchmark.py \
# --base-cmd \
# ' examples/pytorch/translation/run_translation.py --model_name_or_path t5-small \
# --output_dir output_dir --do_train --label_smoothing 0.1 --logging_strategy no \
# --save_strategy no --per_device_train_batch_size 32 --max_source_length 512 \
# --max_target_length 512 --num_train_epochs 1 --overwrite_output_dir \
# --source_lang en --target_lang ro --dataset_name wmt16 --dataset_config "ro-en" \
# --source_prefix "translate English to Romanian: " --warmup_steps 50 \
# --max_train_samples 20000 --dataloader_num_workers 2 ' \
# --target-metric-key train_samples_per_second --repeat-times 1 --variations \
# '|--fp16|--bf16' '--tf32 0|--tf32 1' --report-metric-keys train_loss \
# --repeat-times 1 --base-variation '--tf32 0'
#
# and here is a possible output:
#
#
# | Variation | Train | Diff | Train |
# | | samples | % | loss |
# | | per | | |
# | | second | | |
# |:----------------|----------:|-------:|--------:|
# | --tf32 0 | 285.11 | 0 | 2.51 |
# | --tf32 1 | 342.09 | 20 | 2.51 |
# | --fp16 --tf32 0 | 423.49 | 49 | 2.51 |
# | --fp16 --tf32 1 | 423.13 | 48 | 2.51 |
# | --bf16 --tf32 0 | 416.80 | 46 | 2.52 |
# | --bf16 --tf32 1 | 415.87 | 46 | 2.52 |
#
#
# So you can quickly compare the different outcomes.
#
# Typically running each experiment once is enough, but if the environment is unstable you can
# re-run each multiple times, e.g., 3 using --repeat-times 3 and it will report the averaged results.
#
# By default it'll use the lowest result as the base line to use as 100% and then compare the rest to
# it as can be seen from the table above, but you can also specify which combination is the one to use as
# the baseline, e.g., to change to another entry use: --base-variation '--tf32 1 --fp16 0'
#
# --target-metric-key is there to tell the program which metrics to compare - the different metric keys are
# inside output_dir/all_results.json. e.g., to measure eval performance instead of train use:
# --target-metric-key eval_samples_per_second
# but of course you will need to adjust the --base-cmd value in the example to perform evaluation as
# well (as currently it doesn't)
#
import argparse
import datetime
import io
import itertools
import json
import math
import os
import platform
import re
import shlex
import subprocess
import sys
from pathlib import Path
from statistics import fmean
import pandas as pd
import torch
from tqdm import tqdm
import transformers
lowerCamelCase : Optional[int] = float("nan")
class A:
'''simple docstring'''
def __init__( self : Optional[Any] , A_ : int ) -> Dict:
"""simple docstring"""
lowerCamelCase_ = sys.stdout
lowerCamelCase_ = open(A_ , 'a' )
def __getattr__( self : List[Any] , A_ : Optional[int] ) -> str:
"""simple docstring"""
return getattr(self.stdout , A_ )
def a__ ( self : int , A_ : int ) -> List[str]:
"""simple docstring"""
self.stdout.write(A_ )
# strip tqdm codes
self.file.write(re.sub(r'^.*\r' , '' , A_ , 0 , re.M ) )
def _SCREAMING_SNAKE_CASE ( lowercase : str=80 , lowercase : Tuple=False ):
'''simple docstring'''
lowerCamelCase_ = []
# deal with critical env vars
lowerCamelCase_ = ['CUDA_VISIBLE_DEVICES']
for key in env_keys:
lowerCamelCase_ = os.environ.get(lowercase , lowercase )
if val is not None:
cmd.append(f"""{key}={val}""" )
# python executable (not always needed if the script is executable)
lowerCamelCase_ = sys.executable if full_python_path else sys.executable.split('/' )[-1]
cmd.append(lowercase )
# now the normal args
cmd += list(map(shlex.quote , sys.argv ) )
# split up into up to MAX_WIDTH lines with shell multi-line escapes
lowerCamelCase_ = []
lowerCamelCase_ = ''
while len(lowercase ) > 0:
current_line += f"""{cmd.pop(0 )} """
if len(lowercase ) == 0 or len(lowercase ) + len(cmd[0] ) + 1 > max_width - 1:
lines.append(lowercase )
lowerCamelCase_ = ''
return "\\\n".join(lowercase )
def _SCREAMING_SNAKE_CASE ( lowercase : Optional[int] , lowercase : Tuple ):
'''simple docstring'''
lowerCamelCase_ = re.sub(r'[\\\n]+' , ' ' , args.base_cmd )
# remove --output_dir if any and set our own
lowerCamelCase_ = re.sub('--output_dir\s+[^\s]+' , '' , args.base_cmd )
args.base_cmd += f""" --output_dir {output_dir}"""
# ensure we have --overwrite_output_dir
lowerCamelCase_ = re.sub('--overwrite_output_dir\s+' , '' , args.base_cmd )
args.base_cmd += " --overwrite_output_dir"
return [sys.executable] + shlex.split(args.base_cmd )
def _SCREAMING_SNAKE_CASE ( lowercase : Dict , lowercase : int , lowercase : Dict , lowercase : List[str] , lowercase : List[str] , lowercase : List[str] , lowercase : Dict ):
'''simple docstring'''
if 0:
import random
from time import sleep
sleep(0 )
return dict(
{k: random.uniform(0 , 1_00 ) for k in metric_keys} , **{target_metric_key: random.choice([nan, 10.31, 100.2, 55.6666, 222.2222_2222] )} , )
lowerCamelCase_ = subprocess.run(lowercase , capture_output=lowercase , text=lowercase )
if verbose:
print('STDOUT' , result.stdout )
print('STDERR' , result.stderr )
# save the streams
lowerCamelCase_ = variation.replace(' ' , '-' )
with open(Path(lowercase ) / f"""log.{prefix}.stdout.txt""" , 'w' ) as f:
f.write(result.stdout )
with open(Path(lowercase ) / f"""log.{prefix}.stderr.txt""" , 'w' ) as f:
f.write(result.stderr )
if result.returncode != 0:
if verbose:
print('failed' )
return {target_metric_key: nan}
with io.open(f"""{output_dir}/all_results.json""" , 'r' , encoding='utf-8' ) as f:
lowerCamelCase_ = json.load(lowercase )
# filter out just the keys we want
return {k: v for k, v in metrics.items() if k in metric_keys}
def _SCREAMING_SNAKE_CASE ( lowercase : Dict , lowercase : Dict , lowercase : Optional[Any] , lowercase : List[Any] , lowercase : Optional[Any] , lowercase : Optional[int] , lowercase : List[str] , lowercase : Dict , lowercase : Any , lowercase : int , ):
'''simple docstring'''
lowerCamelCase_ = []
lowerCamelCase_ = []
lowerCamelCase_ = f"""{id}: {variation:<{longest_variation_len}}"""
lowerCamelCase_ = f"""{preamble}: """
lowerCamelCase_ = set(report_metric_keys + [target_metric_key] )
for i in tqdm(range(lowercase ) , desc=lowercase , leave=lowercase ):
lowerCamelCase_ = process_run_single(
lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase )
lowerCamelCase_ = single_run_metrics[target_metric_key]
if not math.isnan(lowercase ):
metrics.append(lowercase )
results.append(lowercase )
outcome += "✓"
else:
outcome += "✘"
lowerCamelCase_ = f"""\33[2K\r{outcome}"""
if len(lowercase ) > 0:
lowerCamelCase_ = {k: fmean([x[k] for x in metrics] ) for k in metrics[0].keys()}
lowerCamelCase_ = round(mean_metrics[target_metric_key] , 2 )
lowerCamelCase_ = f"""{outcome} {mean_target}"""
if len(lowercase ) > 1:
results_str += f""" {tuple(round(lowercase , 2 ) for x in results )}"""
print(lowercase )
lowerCamelCase_ = variation
return mean_metrics
else:
print(lowercase )
return {variation_key: variation, target_metric_key: nan}
def _SCREAMING_SNAKE_CASE ( ):
'''simple docstring'''
lowerCamelCase_ = torch.cuda.get_device_properties(torch.device('cuda' ) )
return f"""
Datetime : {datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S" )}
Software:
transformers: {transformers.__version__}
torch : {torch.__version__}
cuda : {torch.version.cuda}
python : {platform.python_version()}
Hardware:
{torch.cuda.device_count()} GPUs : {properties.name}, {properties.total_memory/2**30:0.2f}GB
"""
def _SCREAMING_SNAKE_CASE ( lowercase : Any , lowercase : Union[str, Any] , lowercase : Optional[Any] , lowercase : Union[str, Any] , lowercase : Union[str, Any] ):
'''simple docstring'''
lowerCamelCase_ = pd.DataFrame(lowercase )
lowerCamelCase_ = 'variation'
lowerCamelCase_ = 'diff_%'
lowerCamelCase_ = nan
if base_variation is not None and len(df[df[variation_key] == base_variation] ):
# this may still return nan
lowerCamelCase_ = df.loc[df[variation_key] == base_variation][target_metric_key].item()
if math.isnan(lowercase ):
# as a fallback, use the minimal value as the sentinel
lowerCamelCase_ = df.loc[df[target_metric_key] != nan][target_metric_key].min()
# create diff column if possible
if not math.isnan(lowercase ):
lowerCamelCase_ = df.apply(
lambda lowercase : round(1_00 * (r[target_metric_key] - sentinel_value) / sentinel_value )
if not math.isnan(r[target_metric_key] )
else 0 , axis='columns' , )
# re-order columns
lowerCamelCase_ = [variation_key, target_metric_key, diff_key, *report_metric_keys]
lowerCamelCase_ = df.reindex(lowercase , axis='columns' ) # reorder cols
# capitalize
lowerCamelCase_ = df.rename(str.capitalize , axis='columns' )
# make the cols as narrow as possible
lowerCamelCase_ = df.rename(lambda lowercase : c.replace('_' , '<br>' ) , axis='columns' )
lowerCamelCase_ = df.rename(lambda lowercase : c.replace('_' , '\n' ) , axis='columns' )
lowerCamelCase_ = ['', 'Copy between the cut-here-lines and paste as is to github or a forum']
report += ["----------8<-----------------8<--------"]
report += ["*** Results:", df_github.to_markdown(index=lowercase , floatfmt='.2f' )]
report += ["```"]
report += ["*** Setup:", get_versions()]
report += ["*** The benchmark command line was:", get_original_command()]
report += ["```"]
report += ["----------8<-----------------8<--------"]
report += ["*** Results (console):", df_console.to_markdown(index=lowercase , floatfmt='.2f' )]
print('\n\n'.join(lowercase ) )
def _SCREAMING_SNAKE_CASE ( ):
'''simple docstring'''
lowerCamelCase_ = argparse.ArgumentParser()
parser.add_argument(
'--base-cmd' , default=lowercase , type=lowercase , required=lowercase , help='Base cmd' , )
parser.add_argument(
'--variations' , default=lowercase , type=lowercase , nargs='+' , required=lowercase , help='Multi-dimensional variations, example: \'|--fp16|--bf16\' \'|--tf32\'' , )
parser.add_argument(
'--base-variation' , default=lowercase , type=lowercase , help='Baseline variation to compare to. if None the minimal target value will be used to compare against' , )
parser.add_argument(
'--target-metric-key' , default=lowercase , type=lowercase , required=lowercase , help='Target metric key in output_dir/all_results.json, e.g., train_samples_per_second' , )
parser.add_argument(
'--report-metric-keys' , default='' , type=lowercase , help='Report metric keys - other metric keys from output_dir/all_results.json to report, e.g., train_loss. Use a single argument e.g., \'train_loss train_samples' , )
parser.add_argument(
'--repeat-times' , default=1 , type=lowercase , help='How many times to re-run each variation - an average will be reported' , )
parser.add_argument(
'--output_dir' , default='output_benchmark' , type=lowercase , help='The output directory where all the benchmark reports will go to and additionally this directory will be used to override --output_dir in the script that is being benchmarked' , )
parser.add_argument(
'--verbose' , default=lowercase , action='store_true' , help='Whether to show the outputs of each run or just the benchmark progress' , )
lowerCamelCase_ = parser.parse_args()
lowerCamelCase_ = args.output_dir
Path(lowercase ).mkdir(exist_ok=lowercase )
lowerCamelCase_ = get_base_command(lowercase , lowercase )
# split each dimension into its --foo variations
lowerCamelCase_ = [list(map(str.strip , re.split(r'\|' , lowercase ) ) ) for x in args.variations]
# build a cartesian product of dimensions and convert those back into cmd-line arg strings,
# while stripping white space for inputs that were empty
lowerCamelCase_ = list(map(str.strip , map(' '.join , itertools.product(*lowercase ) ) ) )
lowerCamelCase_ = max(len(lowercase ) for x in variations )
# split wanted keys
lowerCamelCase_ = args.report_metric_keys.split()
# capture prints into a log file for convenience
lowerCamelCase_ = f"""benchmark-report-{datetime.datetime.now().strftime("%Y-%m-%d-%H-%M-%S" )}.txt"""
print(f"""\nNote: each run's output is also logged under {output_dir}/log.*.std*.txt""" )
print(f"""and this script's output is also piped into {report_fn}""" )
lowerCamelCase_ = Tee(lowercase )
print(f"""\n*** Running {len(lowercase )} benchmarks:""" )
print(f"""Base command: {" ".join(lowercase )}""" )
lowerCamelCase_ = 'variation'
lowerCamelCase_ = []
for id, variation in enumerate(tqdm(lowercase , desc='Total completion: ' , leave=lowercase ) ):
lowerCamelCase_ = base_cmd + variation.split()
results.append(
process_run(
id + 1 , lowercase , lowercase , lowercase , lowercase , args.target_metric_key , lowercase , args.repeat_times , lowercase , args.verbose , ) )
process_results(lowercase , args.target_metric_key , lowercase , args.base_variation , lowercase )
if __name__ == "__main__":
main()
| 208 |
from dataclasses import dataclass
from typing import Dict, Optional, Union
import torch
import torch.nn.functional as F
from torch import nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .attention import BasicTransformerBlock
from .attention_processor import AttentionProcessor, AttnProcessor
from .embeddings import TimestepEmbedding, Timesteps
from .modeling_utils import ModelMixin
@dataclass
class A( UpperCamelCase ):
'''simple docstring'''
UpperCamelCase = 42
class A( UpperCamelCase , UpperCamelCase ):
'''simple docstring'''
@register_to_config
def __init__( self : Tuple , A_ : int = 32 , A_ : int = 64 , A_ : int = 20 , A_ : int = 768 , A_ : Optional[Any]=77 , A_ : Optional[int]=4 , A_ : float = 0.0 , A_ : str = "silu" , A_ : Optional[str] = None , A_ : Optional[str] = None , A_ : Optional[str] = "linear" , A_ : Optional[str] = "prd" , A_ : Optional[int] = None , A_ : Optional[int] = None , A_ : Optional[int] = None , ) -> List[Any]:
"""simple docstring"""
super().__init__()
lowerCamelCase_ = num_attention_heads
lowerCamelCase_ = attention_head_dim
lowerCamelCase_ = num_attention_heads * attention_head_dim
lowerCamelCase_ = additional_embeddings
lowerCamelCase_ = time_embed_dim or inner_dim
lowerCamelCase_ = embedding_proj_dim or embedding_dim
lowerCamelCase_ = clip_embed_dim or embedding_dim
lowerCamelCase_ = Timesteps(A_ , A_ , 0 )
lowerCamelCase_ = TimestepEmbedding(A_ , A_ , out_dim=A_ , act_fn=A_ )
lowerCamelCase_ = nn.Linear(A_ , A_ )
if embedding_proj_norm_type is None:
lowerCamelCase_ = None
elif embedding_proj_norm_type == "layer":
lowerCamelCase_ = nn.LayerNorm(A_ )
else:
raise ValueError(f"""unsupported embedding_proj_norm_type: {embedding_proj_norm_type}""" )
lowerCamelCase_ = nn.Linear(A_ , A_ )
if encoder_hid_proj_type is None:
lowerCamelCase_ = None
elif encoder_hid_proj_type == "linear":
lowerCamelCase_ = nn.Linear(A_ , A_ )
else:
raise ValueError(f"""unsupported encoder_hid_proj_type: {encoder_hid_proj_type}""" )
lowerCamelCase_ = nn.Parameter(torch.zeros(1 , num_embeddings + additional_embeddings , A_ ) )
if added_emb_type == "prd":
lowerCamelCase_ = nn.Parameter(torch.zeros(1 , 1 , A_ ) )
elif added_emb_type is None:
lowerCamelCase_ = None
else:
raise ValueError(
f"""`added_emb_type`: {added_emb_type} is not supported. Make sure to choose one of `'prd'` or `None`.""" )
lowerCamelCase_ = nn.ModuleList(
[
BasicTransformerBlock(
A_ , A_ , A_ , dropout=A_ , activation_fn='gelu' , attention_bias=A_ , )
for d in range(A_ )
] )
if norm_in_type == "layer":
lowerCamelCase_ = nn.LayerNorm(A_ )
elif norm_in_type is None:
lowerCamelCase_ = None
else:
raise ValueError(f"""Unsupported norm_in_type: {norm_in_type}.""" )
lowerCamelCase_ = nn.LayerNorm(A_ )
lowerCamelCase_ = nn.Linear(A_ , A_ )
lowerCamelCase_ = torch.full(
[num_embeddings + additional_embeddings, num_embeddings + additional_embeddings] , -10000.0 )
causal_attention_mask.triu_(1 )
lowerCamelCase_ = causal_attention_mask[None, ...]
self.register_buffer('causal_attention_mask' , A_ , persistent=A_ )
lowerCamelCase_ = nn.Parameter(torch.zeros(1 , A_ ) )
lowerCamelCase_ = nn.Parameter(torch.zeros(1 , A_ ) )
@property
# Copied from diffusers.models.unet_2d_condition.UNet2DConditionModel.attn_processors
def a__ ( self : str ) -> Dict[str, AttentionProcessor]:
"""simple docstring"""
lowerCamelCase_ = {}
def fn_recursive_add_processors(A_ : str , A_ : torch.nn.Module , A_ : Dict[str, AttentionProcessor] ):
if hasattr(A_ , 'set_processor' ):
lowerCamelCase_ = module.processor
for sub_name, child in module.named_children():
fn_recursive_add_processors(f"""{name}.{sub_name}""" , A_ , A_ )
return processors
for name, module in self.named_children():
fn_recursive_add_processors(A_ , A_ , A_ )
return processors
def a__ ( self : List[Any] , A_ : Union[AttentionProcessor, Dict[str, AttentionProcessor]] ) -> Dict:
"""simple docstring"""
lowerCamelCase_ = len(self.attn_processors.keys() )
if isinstance(A_ , A_ ) and len(A_ ) != count:
raise ValueError(
f"""A dict of processors was passed, but the number of processors {len(A_ )} does not match the"""
f""" number of attention layers: {count}. Please make sure to pass {count} processor classes.""" )
def fn_recursive_attn_processor(A_ : str , A_ : torch.nn.Module , A_ : Union[str, Any] ):
if hasattr(A_ , 'set_processor' ):
if not isinstance(A_ , A_ ):
module.set_processor(A_ )
else:
module.set_processor(processor.pop(f"""{name}.processor""" ) )
for sub_name, child in module.named_children():
fn_recursive_attn_processor(f"""{name}.{sub_name}""" , A_ , A_ )
for name, module in self.named_children():
fn_recursive_attn_processor(A_ , A_ , A_ )
def a__ ( self : List[Any] ) -> List[Any]:
"""simple docstring"""
self.set_attn_processor(AttnProcessor() )
def a__ ( self : Dict , A_ : List[Any] , A_ : Union[torch.Tensor, float, int] , A_ : torch.FloatTensor , A_ : Optional[torch.FloatTensor] = None , A_ : Optional[torch.BoolTensor] = None , A_ : bool = True , ) -> str:
"""simple docstring"""
lowerCamelCase_ = hidden_states.shape[0]
lowerCamelCase_ = timestep
if not torch.is_tensor(A_ ):
lowerCamelCase_ = torch.tensor([timesteps] , dtype=torch.long , device=hidden_states.device )
elif torch.is_tensor(A_ ) and len(timesteps.shape ) == 0:
lowerCamelCase_ = timesteps[None].to(hidden_states.device )
# broadcast to batch dimension in a way that's compatible with ONNX/Core ML
lowerCamelCase_ = timesteps * torch.ones(A_ , dtype=timesteps.dtype , device=timesteps.device )
lowerCamelCase_ = self.time_proj(A_ )
# timesteps does not contain any weights and will always return f32 tensors
# but time_embedding might be fp16, so we need to cast here.
lowerCamelCase_ = timesteps_projected.to(dtype=self.dtype )
lowerCamelCase_ = self.time_embedding(A_ )
if self.embedding_proj_norm is not None:
lowerCamelCase_ = self.embedding_proj_norm(A_ )
lowerCamelCase_ = self.embedding_proj(A_ )
if self.encoder_hidden_states_proj is not None and encoder_hidden_states is not None:
lowerCamelCase_ = self.encoder_hidden_states_proj(A_ )
elif self.encoder_hidden_states_proj is not None and encoder_hidden_states is None:
raise ValueError('`encoder_hidden_states_proj` requires `encoder_hidden_states` to be set' )
lowerCamelCase_ = self.proj_in(A_ )
lowerCamelCase_ = self.positional_embedding.to(hidden_states.dtype )
lowerCamelCase_ = []
lowerCamelCase_ = 0
if encoder_hidden_states is not None:
additional_embeds.append(A_ )
additional_embeddings_len += encoder_hidden_states.shape[1]
if len(proj_embeddings.shape ) == 2:
lowerCamelCase_ = proj_embeddings[:, None, :]
if len(hidden_states.shape ) == 2:
lowerCamelCase_ = hidden_states[:, None, :]
lowerCamelCase_ = additional_embeds + [
proj_embeddings,
time_embeddings[:, None, :],
hidden_states,
]
if self.prd_embedding is not None:
lowerCamelCase_ = self.prd_embedding.to(hidden_states.dtype ).expand(A_ , -1 , -1 )
additional_embeds.append(A_ )
lowerCamelCase_ = torch.cat(
A_ , dim=1 , )
# Allow positional_embedding to not include the `addtional_embeddings` and instead pad it with zeros for these additional tokens
lowerCamelCase_ = additional_embeddings_len + proj_embeddings.shape[1] + 1
if positional_embeddings.shape[1] < hidden_states.shape[1]:
lowerCamelCase_ = F.pad(
A_ , (
0,
0,
additional_embeddings_len,
self.prd_embedding.shape[1] if self.prd_embedding is not None else 0,
) , value=0.0 , )
lowerCamelCase_ = hidden_states + positional_embeddings
if attention_mask is not None:
lowerCamelCase_ = (1 - attention_mask.to(hidden_states.dtype )) * -10000.0
lowerCamelCase_ = F.pad(A_ , (0, self.additional_embeddings) , value=0.0 )
lowerCamelCase_ = (attention_mask[:, None, :] + self.causal_attention_mask).to(hidden_states.dtype )
lowerCamelCase_ = attention_mask.repeat_interleave(self.config.num_attention_heads , dim=0 )
if self.norm_in is not None:
lowerCamelCase_ = self.norm_in(A_ )
for block in self.transformer_blocks:
lowerCamelCase_ = block(A_ , attention_mask=A_ )
lowerCamelCase_ = self.norm_out(A_ )
if self.prd_embedding is not None:
lowerCamelCase_ = hidden_states[:, -1]
else:
lowerCamelCase_ = hidden_states[:, additional_embeddings_len:]
lowerCamelCase_ = self.proj_to_clip_embeddings(A_ )
if not return_dict:
return (predicted_image_embedding,)
return PriorTransformerOutput(predicted_image_embedding=A_ )
def a__ ( self : Tuple , A_ : List[Any] ) -> Any:
"""simple docstring"""
lowerCamelCase_ = (prior_latents * self.clip_std) + self.clip_mean
return prior_latents
| 208 | 1 |
import collections
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
_SCREAMING_SNAKE_CASE : str = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE : Dict = '''▁'''
_SCREAMING_SNAKE_CASE : List[str] = {'''vocab_file''': '''prophetnet.tokenizer'''}
_SCREAMING_SNAKE_CASE : List[Any] = {
'''vocab_file''': {
'''microsoft/xprophetnet-large-wiki100-cased''': (
'''https://huggingface.co/microsoft/xprophetnet-large-wiki100-cased/resolve/main/prophetnet.tokenizer'''
),
}
}
_SCREAMING_SNAKE_CASE : Any = {
'''microsoft/xprophetnet-large-wiki100-cased''': {'''do_lower_case''': False},
}
_SCREAMING_SNAKE_CASE : Optional[Any] = {
'''microsoft/xprophetnet-large-wiki100-cased''': 512,
}
def UpperCAmelCase_ ( _A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = collections.OrderedDict()
with open(_A , '''r''' , encoding='''utf-8''' ) as reader:
SCREAMING_SNAKE_CASE__ = reader.readlines()
for index, token in enumerate(_A ):
SCREAMING_SNAKE_CASE__ = token.rstrip('''\n''' )
SCREAMING_SNAKE_CASE__ = index
return vocab
class UpperCAmelCase__ ( A__ ):
"""simple docstring"""
a = VOCAB_FILES_NAMES
a = PRETRAINED_VOCAB_FILES_MAP
a = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a = ["input_ids", "attention_mask"]
def __init__( self : Union[str, Any] , __lowerCamelCase : Optional[Any] , __lowerCamelCase : Tuple="[SEP]" , __lowerCamelCase : List[Any]="[SEP]" , __lowerCamelCase : List[Any]="[SEP]" , __lowerCamelCase : str="[UNK]" , __lowerCamelCase : Optional[Any]="[PAD]" , __lowerCamelCase : Any="[CLS]" , __lowerCamelCase : Any="[MASK]" , __lowerCamelCase : Optional[Dict[str, Any]] = None , **__lowerCamelCase : Dict , ) -> None:
SCREAMING_SNAKE_CASE__ = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=__lowerCamelCase , eos_token=__lowerCamelCase , sep_token=__lowerCamelCase , unk_token=__lowerCamelCase , pad_token=__lowerCamelCase , cls_token=__lowerCamelCase , mask_token=__lowerCamelCase , sp_model_kwargs=self.sp_model_kwargs , **__lowerCamelCase , )
try:
import sentencepiece as spm
except ImportError:
logger.warning(
'''You need to install SentencePiece to use XLMRobertaTokenizer: https://github.com/google/sentencepiece'''
''' pip install sentencepiece''' )
raise
SCREAMING_SNAKE_CASE__ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(__lowerCamelCase ) )
SCREAMING_SNAKE_CASE__ = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# put special tokens and [unused] tokens into the vocab
SCREAMING_SNAKE_CASE__ = {'''[PAD]''': 0, '''[CLS]''': 1, '''[SEP]''': 2, '''[UNK]''': 3, '''[MASK]''': 4}
for i in range(10 ):
SCREAMING_SNAKE_CASE__ = f'''[unused{i}]'''
SCREAMING_SNAKE_CASE__ = 5 + i
# The first "real" token "," has position 15 in the embedding vocab and position 3 in the spm vocab
SCREAMING_SNAKE_CASE__ = 12
SCREAMING_SNAKE_CASE__ = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
for k in self.fairseq_tokens_to_ids.keys():
self.unique_no_split_tokens.append(__lowerCamelCase )
def __getstate__( self : Optional[Any] ) -> int:
SCREAMING_SNAKE_CASE__ = self.__dict__.copy()
SCREAMING_SNAKE_CASE__ = None
return state
def __setstate__( self : Any , __lowerCamelCase : Dict ) -> int:
SCREAMING_SNAKE_CASE__ = d
try:
import sentencepiece as spm
except ImportError:
logger.warning(
'''You need to install SentencePiece to use XLMRobertaTokenizer: https://github.com/google/sentencepiece'''
''' pip install sentencepiece''' )
raise
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
SCREAMING_SNAKE_CASE__ = {}
SCREAMING_SNAKE_CASE__ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def lowercase_ ( self : Any , __lowerCamelCase : List[int] , __lowerCamelCase : Optional[List[int]] = None , __lowerCamelCase : bool = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__lowerCamelCase , token_ids_a=__lowerCamelCase , already_has_special_tokens=__lowerCamelCase )
if token_ids_a is None:
return ([0] * len(__lowerCamelCase )) + [1]
return ([0] * len(__lowerCamelCase )) + [1] + ([0] * len(__lowerCamelCase )) + [1]
def lowercase_ ( self : Optional[Any] , __lowerCamelCase : List[int] , __lowerCamelCase : Optional[List[int]] = None ) -> List[int]:
SCREAMING_SNAKE_CASE__ = [self.sep_token_id]
if token_ids_a is None:
return len(token_ids_a + sep ) * [0]
return len(token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def lowercase_ ( self : int ) -> Tuple:
return len(self.sp_model ) + self.fairseq_offset
def lowercase_ ( self : List[Any] ) -> Any:
SCREAMING_SNAKE_CASE__ = {self.convert_ids_to_tokens(__lowerCamelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def lowercase_ ( self : List[Any] , __lowerCamelCase : str ) -> str:
return self.sp_model.encode(__lowerCamelCase , out_type=__lowerCamelCase )
def lowercase_ ( self : Optional[Any] , __lowerCamelCase : Union[str, Any] ) -> List[Any]:
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
SCREAMING_SNAKE_CASE__ = self.sp_model.PieceToId(__lowerCamelCase )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def lowercase_ ( self : List[str] , __lowerCamelCase : Union[str, Any] ) -> Tuple:
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def lowercase_ ( self : List[Any] , __lowerCamelCase : Optional[Any] ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE__ = ''''''.join(__lowerCamelCase ).replace(__lowerCamelCase , ''' ''' ).strip()
return out_string
def lowercase_ ( self : List[str] , __lowerCamelCase : str , __lowerCamelCase : Optional[str] = None ) -> Tuple[str]:
if not os.path.isdir(__lowerCamelCase ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
SCREAMING_SNAKE_CASE__ = os.path.join(
__lowerCamelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__lowerCamelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , __lowerCamelCase )
elif not os.path.isfile(self.vocab_file ):
with open(__lowerCamelCase , '''wb''' ) as fi:
SCREAMING_SNAKE_CASE__ = self.sp_model.serialized_model_proto()
fi.write(__lowerCamelCase )
return (out_vocab_file,)
def lowercase_ ( self : str , __lowerCamelCase : List[int] , __lowerCamelCase : Optional[List[int]] = None ) -> List[int]:
if token_ids_a is None:
return token_ids_a + [self.sep_token_id]
SCREAMING_SNAKE_CASE__ = [self.sep_token_id]
return token_ids_a + sep + token_ids_a + sep
| 314 |
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import YolosImageProcessor
class UpperCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
def __init__( self : Optional[Any] , __lowerCamelCase : Optional[int] , __lowerCamelCase : Union[str, Any]=7 , __lowerCamelCase : Any=3 , __lowerCamelCase : Any=30 , __lowerCamelCase : str=400 , __lowerCamelCase : Optional[int]=True , __lowerCamelCase : Dict=None , __lowerCamelCase : Union[str, Any]=True , __lowerCamelCase : Optional[int]=[0.5, 0.5, 0.5] , __lowerCamelCase : Tuple=[0.5, 0.5, 0.5] , __lowerCamelCase : Union[str, Any]=True , __lowerCamelCase : List[Any]=1 / 255 , __lowerCamelCase : Dict=True , ) -> List[Any]:
# by setting size["longest_edge"] > max_resolution we're effectively not testing this :p
SCREAMING_SNAKE_CASE__ = size if size is not None else {'''shortest_edge''': 18, '''longest_edge''': 1333}
SCREAMING_SNAKE_CASE__ = parent
SCREAMING_SNAKE_CASE__ = batch_size
SCREAMING_SNAKE_CASE__ = num_channels
SCREAMING_SNAKE_CASE__ = min_resolution
SCREAMING_SNAKE_CASE__ = max_resolution
SCREAMING_SNAKE_CASE__ = do_resize
SCREAMING_SNAKE_CASE__ = size
SCREAMING_SNAKE_CASE__ = do_normalize
SCREAMING_SNAKE_CASE__ = image_mean
SCREAMING_SNAKE_CASE__ = image_std
SCREAMING_SNAKE_CASE__ = do_rescale
SCREAMING_SNAKE_CASE__ = rescale_factor
SCREAMING_SNAKE_CASE__ = do_pad
def lowercase_ ( self : Tuple ) -> Tuple:
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def lowercase_ ( self : Optional[int] , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : int=False ) -> Optional[int]:
if not batched:
SCREAMING_SNAKE_CASE__ = image_inputs[0]
if isinstance(__lowerCamelCase , Image.Image ):
SCREAMING_SNAKE_CASE__,SCREAMING_SNAKE_CASE__ = image.size
else:
SCREAMING_SNAKE_CASE__,SCREAMING_SNAKE_CASE__ = image.shape[1], image.shape[2]
if w < h:
SCREAMING_SNAKE_CASE__ = int(self.size['''shortest_edge'''] * h / w )
SCREAMING_SNAKE_CASE__ = self.size['''shortest_edge''']
elif w > h:
SCREAMING_SNAKE_CASE__ = self.size['''shortest_edge''']
SCREAMING_SNAKE_CASE__ = int(self.size['''shortest_edge'''] * w / h )
else:
SCREAMING_SNAKE_CASE__ = self.size['''shortest_edge''']
SCREAMING_SNAKE_CASE__ = self.size['''shortest_edge''']
else:
SCREAMING_SNAKE_CASE__ = []
for image in image_inputs:
SCREAMING_SNAKE_CASE__,SCREAMING_SNAKE_CASE__ = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
SCREAMING_SNAKE_CASE__ = max(__lowerCamelCase , key=lambda __lowerCamelCase : item[0] )[0]
SCREAMING_SNAKE_CASE__ = max(__lowerCamelCase , key=lambda __lowerCamelCase : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class UpperCAmelCase__ ( A__ , unittest.TestCase ):
"""simple docstring"""
a = YolosImageProcessor if is_vision_available() else None
def lowercase_ ( self : Any ) -> int:
SCREAMING_SNAKE_CASE__ = YolosImageProcessingTester(self )
@property
def lowercase_ ( self : Tuple ) -> str:
return self.image_processor_tester.prepare_image_processor_dict()
def lowercase_ ( self : List[str] ) -> str:
SCREAMING_SNAKE_CASE__ = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__lowerCamelCase , '''image_mean''' ) )
self.assertTrue(hasattr(__lowerCamelCase , '''image_std''' ) )
self.assertTrue(hasattr(__lowerCamelCase , '''do_normalize''' ) )
self.assertTrue(hasattr(__lowerCamelCase , '''do_resize''' ) )
self.assertTrue(hasattr(__lowerCamelCase , '''size''' ) )
def lowercase_ ( self : Optional[Any] ) -> Tuple:
SCREAMING_SNAKE_CASE__ = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'''shortest_edge''': 18, '''longest_edge''': 1333} )
self.assertEqual(image_processor.do_pad , __lowerCamelCase )
SCREAMING_SNAKE_CASE__ = self.image_processing_class.from_dict(
self.image_processor_dict , size=42 , max_size=84 , pad_and_return_pixel_mask=__lowerCamelCase )
self.assertEqual(image_processor.size , {'''shortest_edge''': 42, '''longest_edge''': 84} )
self.assertEqual(image_processor.do_pad , __lowerCamelCase )
def lowercase_ ( self : Tuple ) -> Optional[int]:
pass
def lowercase_ ( self : int ) -> Dict:
# Initialize image_processing
SCREAMING_SNAKE_CASE__ = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
SCREAMING_SNAKE_CASE__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(__lowerCamelCase , Image.Image )
# Test not batched input
SCREAMING_SNAKE_CASE__ = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
SCREAMING_SNAKE_CASE__,SCREAMING_SNAKE_CASE__ = self.image_processor_tester.get_expected_values(__lowerCamelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
SCREAMING_SNAKE_CASE__,SCREAMING_SNAKE_CASE__ = self.image_processor_tester.get_expected_values(__lowerCamelCase , batched=__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = image_processing(__lowerCamelCase , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def lowercase_ ( self : Tuple ) -> str:
# Initialize image_processing
SCREAMING_SNAKE_CASE__ = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
SCREAMING_SNAKE_CASE__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowerCamelCase , numpify=__lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(__lowerCamelCase , np.ndarray )
# Test not batched input
SCREAMING_SNAKE_CASE__ = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
SCREAMING_SNAKE_CASE__,SCREAMING_SNAKE_CASE__ = self.image_processor_tester.get_expected_values(__lowerCamelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
SCREAMING_SNAKE_CASE__ = image_processing(__lowerCamelCase , return_tensors='''pt''' ).pixel_values
SCREAMING_SNAKE_CASE__,SCREAMING_SNAKE_CASE__ = self.image_processor_tester.get_expected_values(__lowerCamelCase , batched=__lowerCamelCase )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def lowercase_ ( self : Dict ) -> Dict:
# Initialize image_processing
SCREAMING_SNAKE_CASE__ = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
SCREAMING_SNAKE_CASE__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowerCamelCase , torchify=__lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(__lowerCamelCase , torch.Tensor )
# Test not batched input
SCREAMING_SNAKE_CASE__ = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
SCREAMING_SNAKE_CASE__,SCREAMING_SNAKE_CASE__ = self.image_processor_tester.get_expected_values(__lowerCamelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
SCREAMING_SNAKE_CASE__ = image_processing(__lowerCamelCase , return_tensors='''pt''' ).pixel_values
SCREAMING_SNAKE_CASE__,SCREAMING_SNAKE_CASE__ = self.image_processor_tester.get_expected_values(__lowerCamelCase , batched=__lowerCamelCase )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def lowercase_ ( self : List[str] ) -> Optional[Any]:
# Initialize image_processings
SCREAMING_SNAKE_CASE__ = self.image_processing_class(**self.image_processor_dict )
SCREAMING_SNAKE_CASE__ = self.image_processing_class(do_resize=__lowerCamelCase , do_normalize=__lowerCamelCase , do_rescale=__lowerCamelCase )
# create random PyTorch tensors
SCREAMING_SNAKE_CASE__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowerCamelCase , torchify=__lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(__lowerCamelCase , torch.Tensor )
# Test whether the method "pad" and calling the image processor return the same tensors
SCREAMING_SNAKE_CASE__ = image_processing_a.pad(__lowerCamelCase , return_tensors='''pt''' )
SCREAMING_SNAKE_CASE__ = image_processing_a(__lowerCamelCase , return_tensors='''pt''' )
self.assertTrue(
torch.allclose(encoded_images_with_method['''pixel_values'''] , encoded_images['''pixel_values'''] , atol=1e-4 ) )
@slow
def lowercase_ ( self : Union[str, Any] ) -> Optional[int]:
# prepare image and target
SCREAMING_SNAKE_CASE__ = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
with open('''./tests/fixtures/tests_samples/COCO/coco_annotations.txt''' , '''r''' ) as f:
SCREAMING_SNAKE_CASE__ = json.loads(f.read() )
SCREAMING_SNAKE_CASE__ = {'''image_id''': 3_9769, '''annotations''': target}
# encode them
SCREAMING_SNAKE_CASE__ = YolosImageProcessor.from_pretrained('''hustvl/yolos-small''' )
SCREAMING_SNAKE_CASE__ = image_processing(images=__lowerCamelCase , annotations=__lowerCamelCase , return_tensors='''pt''' )
# verify pixel values
SCREAMING_SNAKE_CASE__ = torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding['''pixel_values'''].shape , __lowerCamelCase )
SCREAMING_SNAKE_CASE__ = torch.tensor([0.2796, 0.3138, 0.3481] )
self.assertTrue(torch.allclose(encoding['''pixel_values'''][0, 0, 0, :3] , __lowerCamelCase , atol=1e-4 ) )
# verify area
SCREAMING_SNAKE_CASE__ = torch.tensor([5887.9600, 11250.2061, 489353.8438, 837122.7500, 147967.5156, 165732.3438] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''area'''] , __lowerCamelCase ) )
# verify boxes
SCREAMING_SNAKE_CASE__ = torch.Size([6, 4] )
self.assertEqual(encoding['''labels'''][0]['''boxes'''].shape , __lowerCamelCase )
SCREAMING_SNAKE_CASE__ = torch.tensor([0.5503, 0.2765, 0.0604, 0.2215] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''boxes'''][0] , __lowerCamelCase , atol=1e-3 ) )
# verify image_id
SCREAMING_SNAKE_CASE__ = torch.tensor([3_9769] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''image_id'''] , __lowerCamelCase ) )
# verify is_crowd
SCREAMING_SNAKE_CASE__ = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''iscrowd'''] , __lowerCamelCase ) )
# verify class_labels
SCREAMING_SNAKE_CASE__ = torch.tensor([75, 75, 63, 65, 17, 17] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''class_labels'''] , __lowerCamelCase ) )
# verify orig_size
SCREAMING_SNAKE_CASE__ = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''orig_size'''] , __lowerCamelCase ) )
# verify size
SCREAMING_SNAKE_CASE__ = torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''size'''] , __lowerCamelCase ) )
@slow
def lowercase_ ( self : Optional[Any] ) -> Optional[Any]:
# prepare image, target and masks_path
SCREAMING_SNAKE_CASE__ = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
with open('''./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt''' , '''r''' ) as f:
SCREAMING_SNAKE_CASE__ = json.loads(f.read() )
SCREAMING_SNAKE_CASE__ = {'''file_name''': '''000000039769.png''', '''image_id''': 3_9769, '''segments_info''': target}
SCREAMING_SNAKE_CASE__ = pathlib.Path('''./tests/fixtures/tests_samples/COCO/coco_panoptic''' )
# encode them
SCREAMING_SNAKE_CASE__ = YolosImageProcessor(format='''coco_panoptic''' )
SCREAMING_SNAKE_CASE__ = image_processing(images=__lowerCamelCase , annotations=__lowerCamelCase , masks_path=__lowerCamelCase , return_tensors='''pt''' )
# verify pixel values
SCREAMING_SNAKE_CASE__ = torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding['''pixel_values'''].shape , __lowerCamelCase )
SCREAMING_SNAKE_CASE__ = torch.tensor([0.2796, 0.3138, 0.3481] )
self.assertTrue(torch.allclose(encoding['''pixel_values'''][0, 0, 0, :3] , __lowerCamelCase , atol=1e-4 ) )
# verify area
SCREAMING_SNAKE_CASE__ = torch.tensor([147979.6875, 165527.0469, 484638.5938, 11292.9375, 5879.6562, 7634.1147] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''area'''] , __lowerCamelCase ) )
# verify boxes
SCREAMING_SNAKE_CASE__ = torch.Size([6, 4] )
self.assertEqual(encoding['''labels'''][0]['''boxes'''].shape , __lowerCamelCase )
SCREAMING_SNAKE_CASE__ = torch.tensor([0.2625, 0.5437, 0.4688, 0.8625] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''boxes'''][0] , __lowerCamelCase , atol=1e-3 ) )
# verify image_id
SCREAMING_SNAKE_CASE__ = torch.tensor([3_9769] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''image_id'''] , __lowerCamelCase ) )
# verify is_crowd
SCREAMING_SNAKE_CASE__ = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''iscrowd'''] , __lowerCamelCase ) )
# verify class_labels
SCREAMING_SNAKE_CASE__ = torch.tensor([17, 17, 63, 75, 75, 93] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''class_labels'''] , __lowerCamelCase ) )
# verify masks
SCREAMING_SNAKE_CASE__ = 82_2873
self.assertEqual(encoding['''labels'''][0]['''masks'''].sum().item() , __lowerCamelCase )
# verify orig_size
SCREAMING_SNAKE_CASE__ = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''orig_size'''] , __lowerCamelCase ) )
# verify size
SCREAMING_SNAKE_CASE__ = torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''size'''] , __lowerCamelCase ) )
| 314 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
a_ = {
"configuration_lxmert": ["LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP", "LxmertConfig"],
"tokenization_lxmert": ["LxmertTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = ["LxmertTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = [
"LxmertEncoder",
"LxmertForPreTraining",
"LxmertForQuestionAnswering",
"LxmertModel",
"LxmertPreTrainedModel",
"LxmertVisualFeatureEncoder",
"LxmertXLayer",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = [
"TF_LXMERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFLxmertForPreTraining",
"TFLxmertMainLayer",
"TFLxmertModel",
"TFLxmertPreTrainedModel",
"TFLxmertVisualFeatureEncoder",
]
if TYPE_CHECKING:
from .configuration_lxmert import LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP, LxmertConfig
from .tokenization_lxmert import LxmertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_lxmert_fast import LxmertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_lxmert import (
LxmertEncoder,
LxmertForPreTraining,
LxmertForQuestionAnswering,
LxmertModel,
LxmertPreTrainedModel,
LxmertVisualFeatureEncoder,
LxmertXLayer,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_lxmert import (
TF_LXMERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFLxmertForPreTraining,
TFLxmertMainLayer,
TFLxmertModel,
TFLxmertPreTrainedModel,
TFLxmertVisualFeatureEncoder,
)
else:
import sys
a_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) | 163 |
"""simple docstring"""
from collections import namedtuple
import requests
from lxml import html # type: ignore
a_ = namedtuple("covid_data", "cases deaths recovered")
def a__ ( __lowercase = "https://www.worldometers.info/coronavirus/" ) -> covid_data:
_A = "//div[@class = \"maincounter-number\"]/span/text()"
return covid_data(*html.fromstring(requests.get(__lowercase ).content ).xpath(__lowercase ) )
a_ = "Total COVID-19 cases in the world: {}\nTotal deaths due to COVID-19 in the world: {}\nTotal COVID-19 patients recovered in the world: {}"
print(fmt.format(*covid_stats())) | 163 | 1 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.