code
stringlengths 87
55.2k
| code_codestyle
int64 0
349
| style_context
stringlengths 135
49.1k
| style_context_codestyle
int64 0
349
| label
int64 0
1
|
---|---|---|---|---|
import importlib
import math
import os
from dataclasses import dataclass
from enum import Enum
from typing import Any, Dict, Optional, Tuple, Union
import flax
import jax.numpy as jnp
from ..utils import BaseOutput
_a = '''scheduler_config.json'''
class A_ ( snake_case__ ):
_lowercase : Optional[Any] = 1
_lowercase : Tuple = 2
_lowercase : Dict = 3
_lowercase : int = 4
_lowercase : Optional[Any] = 5
@dataclass
class A_ ( snake_case__ ):
_lowercase : jnp.ndarray
class A_ :
_lowercase : Optional[int] = SCHEDULER_CONFIG_NAME
_lowercase : Dict = ['dtype']
_lowercase : int = []
_lowercase : Union[str, Any] = True
@classmethod
def UpperCAmelCase ( cls : Union[str, Any] , UpperCAmelCase : Dict[str, Any] = None , UpperCAmelCase : Optional[str] = None , UpperCAmelCase : List[str]=False , **UpperCAmelCase : Optional[int] , ) -> Tuple:
__lowerCAmelCase , __lowerCAmelCase: Optional[Any] = cls.load_config(
pretrained_model_name_or_path=UpperCAmelCase , subfolder=UpperCAmelCase , return_unused_kwargs=UpperCAmelCase , **UpperCAmelCase , )
__lowerCAmelCase , __lowerCAmelCase: Optional[Any] = cls.from_config(UpperCAmelCase , return_unused_kwargs=UpperCAmelCase , **UpperCAmelCase )
if hasattr(UpperCAmelCase , 'create_state' ) and getattr(UpperCAmelCase , 'has_state' , UpperCAmelCase ):
__lowerCAmelCase: Dict = scheduler.create_state()
if return_unused_kwargs:
return scheduler, state, unused_kwargs
return scheduler, state
def UpperCAmelCase ( self : Tuple , UpperCAmelCase : Union[str, os.PathLike] , UpperCAmelCase : bool = False , **UpperCAmelCase : Any ) -> List[str]:
self.save_config(save_directory=UpperCAmelCase , push_to_hub=UpperCAmelCase , **UpperCAmelCase )
@property
def UpperCAmelCase ( self : str ) -> Dict:
return self._get_compatibles()
@classmethod
def UpperCAmelCase ( cls : Optional[int] ) -> Any:
__lowerCAmelCase: Optional[int] = list(set([cls.__name__] + cls._compatibles ) )
__lowerCAmelCase: Dict = importlib.import_module(__name__.split('.' )[0] )
__lowerCAmelCase: Dict = [
getattr(UpperCAmelCase , UpperCAmelCase ) for c in compatible_classes_str if hasattr(UpperCAmelCase , UpperCAmelCase )
]
return compatible_classes
def _a ( SCREAMING_SNAKE_CASE : jnp.ndarray , SCREAMING_SNAKE_CASE : Tuple[int] ) -> jnp.ndarray:
"""simple docstring"""
assert len(SCREAMING_SNAKE_CASE ) >= x.ndim
return jnp.broadcast_to(x.reshape(x.shape + (1,) * (len(SCREAMING_SNAKE_CASE ) - x.ndim) ) , SCREAMING_SNAKE_CASE )
def _a ( SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : Any=0.9_9_9 , SCREAMING_SNAKE_CASE : List[Any]=jnp.floataa ) -> jnp.ndarray:
"""simple docstring"""
def alpha_bar(SCREAMING_SNAKE_CASE : str ):
return math.cos((time_step + 0.0_0_8) / 1.0_0_8 * math.pi / 2 ) ** 2
__lowerCAmelCase: str = []
for i in range(SCREAMING_SNAKE_CASE ):
__lowerCAmelCase: Union[str, Any] = i / num_diffusion_timesteps
__lowerCAmelCase: List[str] = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar(SCREAMING_SNAKE_CASE ) / alpha_bar(SCREAMING_SNAKE_CASE ) , SCREAMING_SNAKE_CASE ) )
return jnp.array(SCREAMING_SNAKE_CASE , dtype=SCREAMING_SNAKE_CASE )
@flax.struct.dataclass
class A_ :
_lowercase : jnp.ndarray
_lowercase : jnp.ndarray
_lowercase : jnp.ndarray
@classmethod
def UpperCAmelCase ( cls : str , UpperCAmelCase : Optional[int] ) -> Any:
__lowerCAmelCase: str = scheduler.config
if config.trained_betas is not None:
__lowerCAmelCase: Tuple = jnp.asarray(config.trained_betas , dtype=scheduler.dtype )
elif config.beta_schedule == "linear":
__lowerCAmelCase: Any = jnp.linspace(config.beta_start , config.beta_end , config.num_train_timesteps , dtype=scheduler.dtype )
elif config.beta_schedule == "scaled_linear":
# this schedule is very specific to the latent diffusion model.
__lowerCAmelCase: List[Any] = (
jnp.linspace(
config.beta_start**0.5 , config.beta_end**0.5 , config.num_train_timesteps , dtype=scheduler.dtype )
** 2
)
elif config.beta_schedule == "squaredcos_cap_v2":
# Glide cosine schedule
__lowerCAmelCase: str = betas_for_alpha_bar(config.num_train_timesteps , dtype=scheduler.dtype )
else:
raise NotImplementedError(
F'''beta_schedule {config.beta_schedule} is not implemented for scheduler {scheduler.__class__.__name__}''' )
__lowerCAmelCase: Optional[Any] = 1.0 - betas
__lowerCAmelCase: Optional[Any] = jnp.cumprod(UpperCAmelCase , axis=0 )
return cls(
alphas=UpperCAmelCase , betas=UpperCAmelCase , alphas_cumprod=UpperCAmelCase , )
def _a ( SCREAMING_SNAKE_CASE : CommonSchedulerState , SCREAMING_SNAKE_CASE : jnp.ndarray , SCREAMING_SNAKE_CASE : jnp.ndarray , SCREAMING_SNAKE_CASE : jnp.ndarray ) -> int:
"""simple docstring"""
__lowerCAmelCase: Optional[int] = state.alphas_cumprod
__lowerCAmelCase: str = alphas_cumprod[timesteps] ** 0.5
__lowerCAmelCase: Any = sqrt_alpha_prod.flatten()
__lowerCAmelCase: Any = broadcast_to_shape_from_left(SCREAMING_SNAKE_CASE , original_samples.shape )
__lowerCAmelCase: Any = (1 - alphas_cumprod[timesteps]) ** 0.5
__lowerCAmelCase: str = sqrt_one_minus_alpha_prod.flatten()
__lowerCAmelCase: str = broadcast_to_shape_from_left(SCREAMING_SNAKE_CASE , original_samples.shape )
return sqrt_alpha_prod, sqrt_one_minus_alpha_prod
def _a ( SCREAMING_SNAKE_CASE : CommonSchedulerState , SCREAMING_SNAKE_CASE : jnp.ndarray , SCREAMING_SNAKE_CASE : jnp.ndarray , SCREAMING_SNAKE_CASE : jnp.ndarray ) -> str:
"""simple docstring"""
__lowerCAmelCase , __lowerCAmelCase: Union[str, Any] = get_sqrt_alpha_prod(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
__lowerCAmelCase: Optional[int] = sqrt_alpha_prod * original_samples + sqrt_one_minus_alpha_prod * noise
return noisy_samples
def _a ( SCREAMING_SNAKE_CASE : CommonSchedulerState , SCREAMING_SNAKE_CASE : jnp.ndarray , SCREAMING_SNAKE_CASE : jnp.ndarray , SCREAMING_SNAKE_CASE : jnp.ndarray ) -> Any:
"""simple docstring"""
__lowerCAmelCase , __lowerCAmelCase: Tuple = get_sqrt_alpha_prod(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
__lowerCAmelCase: int = sqrt_alpha_prod * noise - sqrt_one_minus_alpha_prod * sample
return velocity
| 322 |
import argparse
from pathlib import Path
import torch
from packaging import version
from torch.onnx import export
from diffusers import AutoencoderKL
_a = version.parse(version.parse(torch.__version__).base_version) < version.parse('''1.11''')
def _a ( SCREAMING_SNAKE_CASE : Any , SCREAMING_SNAKE_CASE : tuple , SCREAMING_SNAKE_CASE : Path , SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : List[Any] , SCREAMING_SNAKE_CASE : Union[str, Any] , SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : Optional[int]=False , ) -> str:
"""simple docstring"""
output_path.parent.mkdir(parents=SCREAMING_SNAKE_CASE , exist_ok=SCREAMING_SNAKE_CASE )
# PyTorch deprecated the `enable_onnx_checker` and `use_external_data_format` arguments in v1.11,
# so we check the torch version for backwards compatibility
if is_torch_less_than_1_11:
export(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , f=output_path.as_posix() , input_names=SCREAMING_SNAKE_CASE , output_names=SCREAMING_SNAKE_CASE , dynamic_axes=SCREAMING_SNAKE_CASE , do_constant_folding=SCREAMING_SNAKE_CASE , use_external_data_format=SCREAMING_SNAKE_CASE , enable_onnx_checker=SCREAMING_SNAKE_CASE , opset_version=SCREAMING_SNAKE_CASE , )
else:
export(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , f=output_path.as_posix() , input_names=SCREAMING_SNAKE_CASE , output_names=SCREAMING_SNAKE_CASE , dynamic_axes=SCREAMING_SNAKE_CASE , do_constant_folding=SCREAMING_SNAKE_CASE , opset_version=SCREAMING_SNAKE_CASE , )
@torch.no_grad()
def _a ( SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : bool = False ) -> Union[str, Any]:
"""simple docstring"""
__lowerCAmelCase: List[Any] = torch.floataa if fpaa else torch.floataa
if fpaa and torch.cuda.is_available():
__lowerCAmelCase: str = 'cuda'
elif fpaa and not torch.cuda.is_available():
raise ValueError('`float16` model export is only supported on GPUs with CUDA' )
else:
__lowerCAmelCase: Dict = 'cpu'
__lowerCAmelCase: Optional[int] = Path(SCREAMING_SNAKE_CASE )
# VAE DECODER
__lowerCAmelCase: Optional[Any] = AutoencoderKL.from_pretrained(model_path + '/vae' )
__lowerCAmelCase: Union[str, Any] = vae_decoder.config.latent_channels
# forward only through the decoder part
__lowerCAmelCase: Any = vae_decoder.decode
onnx_export(
SCREAMING_SNAKE_CASE , model_args=(
torch.randn(1 , SCREAMING_SNAKE_CASE , 25 , 25 ).to(device=SCREAMING_SNAKE_CASE , dtype=SCREAMING_SNAKE_CASE ),
False,
) , output_path=output_path / 'vae_decoder' / 'model.onnx' , ordered_input_names=['latent_sample', 'return_dict'] , output_names=['sample'] , dynamic_axes={
'latent_sample': {0: 'batch', 1: 'channels', 2: 'height', 3: 'width'},
} , opset=SCREAMING_SNAKE_CASE , )
del vae_decoder
if __name__ == "__main__":
_a = argparse.ArgumentParser()
parser.add_argument(
'''--model_path''',
type=str,
required=True,
help='''Path to the `diffusers` checkpoint to convert (either a local directory or on the Hub).''',
)
parser.add_argument('''--output_path''', type=str, required=True, help='''Path to the output model.''')
parser.add_argument(
'''--opset''',
default=1_4,
type=int,
help='''The version of the ONNX operator set to use.''',
)
parser.add_argument('''--fp16''', action='''store_true''', default=False, help='''Export the models in `float16` mode''')
_a = parser.parse_args()
print(args.output_path)
convert_models(args.model_path, args.output_path, args.opset, args.fpaa)
print('''SD: Done: ONNX''')
| 322 | 1 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_a = logging.get_logger(__name__)
_a = {
'''kssteven/ibert-roberta-base''': '''https://huggingface.co/kssteven/ibert-roberta-base/resolve/main/config.json''',
'''kssteven/ibert-roberta-large''': '''https://huggingface.co/kssteven/ibert-roberta-large/resolve/main/config.json''',
'''kssteven/ibert-roberta-large-mnli''': (
'''https://huggingface.co/kssteven/ibert-roberta-large-mnli/resolve/main/config.json'''
),
}
class A_ ( snake_case__ ):
_lowercase : Any = 'ibert'
def __init__( self : Tuple , UpperCAmelCase : Any=3_0_5_2_2 , UpperCAmelCase : str=7_6_8 , UpperCAmelCase : Optional[Any]=1_2 , UpperCAmelCase : Optional[Any]=1_2 , UpperCAmelCase : int=3_0_7_2 , UpperCAmelCase : Optional[int]="gelu" , UpperCAmelCase : int=0.1 , UpperCAmelCase : List[Any]=0.1 , UpperCAmelCase : Tuple=5_1_2 , UpperCAmelCase : List[str]=2 , UpperCAmelCase : List[Any]=0.02 , UpperCAmelCase : Optional[Any]=1E-12 , UpperCAmelCase : Tuple=1 , UpperCAmelCase : List[str]=0 , UpperCAmelCase : Any=2 , UpperCAmelCase : Union[str, Any]="absolute" , UpperCAmelCase : str=False , UpperCAmelCase : int="none" , **UpperCAmelCase : int , ) -> List[Any]:
super().__init__(pad_token_id=UpperCAmelCase , bos_token_id=UpperCAmelCase , eos_token_id=UpperCAmelCase , **UpperCAmelCase )
__lowerCAmelCase: Union[str, Any] = vocab_size
__lowerCAmelCase: str = hidden_size
__lowerCAmelCase: Tuple = num_hidden_layers
__lowerCAmelCase: Union[str, Any] = num_attention_heads
__lowerCAmelCase: Dict = hidden_act
__lowerCAmelCase: Tuple = intermediate_size
__lowerCAmelCase: Optional[Any] = hidden_dropout_prob
__lowerCAmelCase: str = attention_probs_dropout_prob
__lowerCAmelCase: Optional[int] = max_position_embeddings
__lowerCAmelCase: Dict = type_vocab_size
__lowerCAmelCase: List[str] = initializer_range
__lowerCAmelCase: Optional[Any] = layer_norm_eps
__lowerCAmelCase: Dict = position_embedding_type
__lowerCAmelCase: List[str] = quant_mode
__lowerCAmelCase: int = force_dequant
class A_ ( snake_case__ ):
@property
def UpperCAmelCase ( self : Optional[Any] ) -> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
__lowerCAmelCase: Tuple = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
__lowerCAmelCase: Union[str, Any] = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
] )
| 322 |
def _a ( SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : list[list[int]] ) -> int:
"""simple docstring"""
def update_area_of_max_square(SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : int ) -> int:
# BASE CASE
if row >= rows or col >= cols:
return 0
__lowerCAmelCase: Union[str, Any] = update_area_of_max_square(SCREAMING_SNAKE_CASE , col + 1 )
__lowerCAmelCase: Tuple = update_area_of_max_square(row + 1 , col + 1 )
__lowerCAmelCase: int = update_area_of_max_square(row + 1 , SCREAMING_SNAKE_CASE )
if mat[row][col]:
__lowerCAmelCase: List[str] = 1 + min([right, diagonal, down] )
__lowerCAmelCase: List[str] = max(largest_square_area[0] , SCREAMING_SNAKE_CASE )
return sub_problem_sol
else:
return 0
__lowerCAmelCase: List[str] = [0]
update_area_of_max_square(0 , 0 )
return largest_square_area[0]
def _a ( SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : list[list[int]] ) -> int:
"""simple docstring"""
def update_area_of_max_square_using_dp_array(
SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : list[list[int]] ) -> int:
if row >= rows or col >= cols:
return 0
if dp_array[row][col] != -1:
return dp_array[row][col]
__lowerCAmelCase: List[Any] = update_area_of_max_square_using_dp_array(SCREAMING_SNAKE_CASE , col + 1 , SCREAMING_SNAKE_CASE )
__lowerCAmelCase: Union[str, Any] = update_area_of_max_square_using_dp_array(row + 1 , col + 1 , SCREAMING_SNAKE_CASE )
__lowerCAmelCase: Any = update_area_of_max_square_using_dp_array(row + 1 , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
if mat[row][col]:
__lowerCAmelCase: int = 1 + min([right, diagonal, down] )
__lowerCAmelCase: Union[str, Any] = max(largest_square_area[0] , SCREAMING_SNAKE_CASE )
__lowerCAmelCase: Optional[int] = sub_problem_sol
return sub_problem_sol
else:
return 0
__lowerCAmelCase: int = [0]
__lowerCAmelCase: int = [[-1] * cols for _ in range(SCREAMING_SNAKE_CASE )]
update_area_of_max_square_using_dp_array(0 , 0 , SCREAMING_SNAKE_CASE )
return largest_square_area[0]
def _a ( SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : list[list[int]] ) -> int:
"""simple docstring"""
__lowerCAmelCase: int = [[0] * (cols + 1) for _ in range(rows + 1 )]
__lowerCAmelCase: Optional[Any] = 0
for row in range(rows - 1 , -1 , -1 ):
for col in range(cols - 1 , -1 , -1 ):
__lowerCAmelCase: Union[str, Any] = dp_array[row][col + 1]
__lowerCAmelCase: str = dp_array[row + 1][col + 1]
__lowerCAmelCase: Optional[int] = dp_array[row + 1][col]
if mat[row][col] == 1:
__lowerCAmelCase: Optional[Any] = 1 + min(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
__lowerCAmelCase: str = max(dp_array[row][col] , SCREAMING_SNAKE_CASE )
else:
__lowerCAmelCase: Dict = 0
return largest_square_area
def _a ( SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : list[list[int]] ) -> int:
"""simple docstring"""
__lowerCAmelCase: Tuple = [0] * (cols + 1)
__lowerCAmelCase: Optional[int] = [0] * (cols + 1)
__lowerCAmelCase: str = 0
for row in range(rows - 1 , -1 , -1 ):
for col in range(cols - 1 , -1 , -1 ):
__lowerCAmelCase: int = current_row[col + 1]
__lowerCAmelCase: Union[str, Any] = next_row[col + 1]
__lowerCAmelCase: Any = next_row[col]
if mat[row][col] == 1:
__lowerCAmelCase: str = 1 + min(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
__lowerCAmelCase: str = max(current_row[col] , SCREAMING_SNAKE_CASE )
else:
__lowerCAmelCase: Optional[Any] = 0
__lowerCAmelCase: int = current_row
return largest_square_area
if __name__ == "__main__":
import doctest
doctest.testmod()
print(largest_square_area_in_matrix_bottom_up(2, 2, [[1, 1], [1, 1]]))
| 322 | 1 |
import re
def _a ( SCREAMING_SNAKE_CASE : str ) -> bool:
"""simple docstring"""
__lowerCAmelCase: List[str] = re.compile(
R'^(?:0|94|\+94|0{2}94)' R'7(0|1|2|4|5|6|7|8)' R'(-| |)' R'\d{7}$' )
return bool(re.search(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) )
if __name__ == "__main__":
_a = '''0094702343221'''
print(is_sri_lankan_phone_number(phone))
| 322 |
import argparse
import json
import os
from tensorflow.core.protobuf.saved_model_pba import SavedModel
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_copies.py
_a = '''.'''
# Internal TensorFlow ops that can be safely ignored (mostly specific to a saved model)
_a = [
'''Assert''',
'''AssignVariableOp''',
'''EmptyTensorList''',
'''MergeV2Checkpoints''',
'''ReadVariableOp''',
'''ResourceGather''',
'''RestoreV2''',
'''SaveV2''',
'''ShardedFilename''',
'''StatefulPartitionedCall''',
'''StaticRegexFullMatch''',
'''VarHandleOp''',
]
def _a ( SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : Dict ) -> Union[str, Any]:
"""simple docstring"""
__lowerCAmelCase: Optional[int] = SavedModel()
__lowerCAmelCase: str = []
with open(os.path.join(SCREAMING_SNAKE_CASE , 'utils' , 'tf_ops' , 'onnx.json' ) ) as f:
__lowerCAmelCase: List[str] = json.load(SCREAMING_SNAKE_CASE )['opsets']
for i in range(1 , opset + 1 ):
onnx_ops.extend(onnx_opsets[str(SCREAMING_SNAKE_CASE )] )
with open(SCREAMING_SNAKE_CASE , 'rb' ) as f:
saved_model.ParseFromString(f.read() )
__lowerCAmelCase: Optional[int] = set()
# Iterate over every metagraph in case there is more than one (a saved model can contain multiple graphs)
for meta_graph in saved_model.meta_graphs:
# Add operations in the graph definition
model_op_names.update(node.op for node in meta_graph.graph_def.node )
# Go through the functions in the graph definition
for func in meta_graph.graph_def.library.function:
# Add operations in each function
model_op_names.update(node.op for node in func.node_def )
# Convert to list, sorted if you want
__lowerCAmelCase: List[str] = sorted(SCREAMING_SNAKE_CASE )
__lowerCAmelCase: Optional[int] = []
for op in model_op_names:
if op not in onnx_ops and op not in INTERNAL_OPS:
incompatible_ops.append(SCREAMING_SNAKE_CASE )
if strict and len(SCREAMING_SNAKE_CASE ) > 0:
raise Exception(f'''Found the following incompatible ops for the opset {opset}:\n''' + incompatible_ops )
elif len(SCREAMING_SNAKE_CASE ) > 0:
print(f'''Found the following incompatible ops for the opset {opset}:''' )
print(*SCREAMING_SNAKE_CASE , sep='\n' )
else:
print(f'''The saved model {saved_model_path} can properly be converted with ONNX.''' )
if __name__ == "__main__":
_a = argparse.ArgumentParser()
parser.add_argument('''--saved_model_path''', help='''Path of the saved model to check (the .pb file).''')
parser.add_argument(
'''--opset''', default=1_2, type=int, help='''The ONNX opset against which the model has to be tested.'''
)
parser.add_argument(
'''--framework''', choices=['''onnx'''], default='''onnx''', help='''Frameworks against which to test the saved model.'''
)
parser.add_argument(
'''--strict''', action='''store_true''', help='''Whether make the checking strict (raise errors) or not (raise warnings)'''
)
_a = parser.parse_args()
if args.framework == "onnx":
onnx_compliancy(args.saved_model_path, args.strict, args.opset)
| 322 | 1 |
import argparse
import torch
from transformers import GPTaLMHeadModel, RobertaForMaskedLM
if __name__ == "__main__":
_a = argparse.ArgumentParser(
description=(
'''Extraction some layers of the full RobertaForMaskedLM or GPT2LMHeadModel for Transfer Learned'''
''' Distillation'''
)
)
parser.add_argument('''--model_type''', default='''roberta''', choices=['''roberta''', '''gpt2'''])
parser.add_argument('''--model_name''', default='''roberta-large''', type=str)
parser.add_argument('''--dump_checkpoint''', default='''serialization_dir/tf_roberta_048131723.pth''', type=str)
parser.add_argument('''--vocab_transform''', action='''store_true''')
_a = parser.parse_args()
if args.model_type == "roberta":
_a = RobertaForMaskedLM.from_pretrained(args.model_name)
_a = '''roberta'''
elif args.model_type == "gpt2":
_a = GPTaLMHeadModel.from_pretrained(args.model_name)
_a = '''transformer'''
_a = model.state_dict()
_a = {}
# Embeddings #
if args.model_type == "gpt2":
for param_name in ["wte.weight", "wpe.weight"]:
_a = state_dict[f"{prefix}.{param_name}"]
else:
for w in ["word_embeddings", "position_embeddings", "token_type_embeddings"]:
_a = f"{prefix}.embeddings.{w}.weight"
_a = state_dict[param_name]
for w in ["weight", "bias"]:
_a = f"{prefix}.embeddings.LayerNorm.{w}"
_a = state_dict[param_name]
# Transformer Blocks #
_a = 0
for teacher_idx in [0, 2, 4, 7, 9, 1_1]:
if args.model_type == "gpt2":
for layer in ["ln_1", "attn.c_attn", "attn.c_proj", "ln_2", "mlp.c_fc", "mlp.c_proj"]:
for w in ["weight", "bias"]:
_a = state_dict[
f"{prefix}.h.{teacher_idx}.{layer}.{w}"
]
_a = state_dict[f"{prefix}.h.{teacher_idx}.attn.bias"]
else:
for layer in [
"attention.self.query",
"attention.self.key",
"attention.self.value",
"attention.output.dense",
"attention.output.LayerNorm",
"intermediate.dense",
"output.dense",
"output.LayerNorm",
]:
for w in ["weight", "bias"]:
_a = state_dict[
f"{prefix}.encoder.layer.{teacher_idx}.{layer}.{w}"
]
std_idx += 1
# Language Modeling Head ###s
if args.model_type == "roberta":
for layer in ["lm_head.decoder.weight", "lm_head.bias"]:
_a = state_dict[f"{layer}"]
if args.vocab_transform:
for w in ["weight", "bias"]:
_a = state_dict[f"lm_head.dense.{w}"]
_a = state_dict[f"lm_head.layer_norm.{w}"]
elif args.model_type == "gpt2":
for w in ["weight", "bias"]:
_a = state_dict[f"{prefix}.ln_f.{w}"]
_a = state_dict['''lm_head.weight''']
print(f"N layers selected for distillation: {std_idx}")
print(f"Number of params transferred for distillation: {len(compressed_sd.keys())}")
print(f"Save transferred checkpoint to {args.dump_checkpoint}.")
torch.save(compressed_sd, args.dump_checkpoint)
| 322 |
import math
import qiskit
def _a ( SCREAMING_SNAKE_CASE : int = 1 , SCREAMING_SNAKE_CASE : int = 1 , SCREAMING_SNAKE_CASE : int = 1 ) -> qiskit.result.counts.Counts:
"""simple docstring"""
if (
isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
or isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
or isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
):
raise TypeError('inputs must be integers.' )
if (input_a < 0) or (input_a < 0) or (carry_in < 0):
raise ValueError('inputs must be positive.' )
if (
(math.floor(SCREAMING_SNAKE_CASE ) != input_a)
or (math.floor(SCREAMING_SNAKE_CASE ) != input_a)
or (math.floor(SCREAMING_SNAKE_CASE ) != carry_in)
):
raise ValueError('inputs must be exact integers.' )
if (input_a > 2) or (input_a > 2) or (carry_in > 2):
raise ValueError('inputs must be less or equal to 2.' )
# build registers
__lowerCAmelCase: Union[str, Any] = qiskit.QuantumRegister(4 , 'qr' )
__lowerCAmelCase: List[Any] = qiskit.ClassicalRegister(2 , 'cr' )
# list the entries
__lowerCAmelCase: Any = [input_a, input_a, carry_in]
__lowerCAmelCase: List[str] = qiskit.QuantumCircuit(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
for i in range(0 , 3 ):
if entry[i] == 2:
quantum_circuit.h(SCREAMING_SNAKE_CASE ) # for hadamard entries
elif entry[i] == 1:
quantum_circuit.x(SCREAMING_SNAKE_CASE ) # for 1 entries
elif entry[i] == 0:
quantum_circuit.i(SCREAMING_SNAKE_CASE ) # for 0 entries
# build the circuit
quantum_circuit.ccx(0 , 1 , 3 ) # ccx = toffoli gate
quantum_circuit.cx(0 , 1 )
quantum_circuit.ccx(1 , 2 , 3 )
quantum_circuit.cx(1 , 2 )
quantum_circuit.cx(0 , 1 )
quantum_circuit.measure([2, 3] , SCREAMING_SNAKE_CASE ) # measure the last two qbits
__lowerCAmelCase: List[str] = qiskit.Aer.get_backend('aer_simulator' )
__lowerCAmelCase: List[Any] = qiskit.execute(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , shots=10_00 )
return job.result().get_counts(SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
print(f"Total sum count for state is: {quantum_full_adder(1, 1, 1)}")
| 322 | 1 |
import argparse
import os
import shutil
from pathlib import Path
import onnx
import torch
from packaging import version
from torch.onnx import export
from diffusers import OnnxRuntimeModel, OnnxStableDiffusionPipeline, StableDiffusionPipeline
_a = version.parse(version.parse(torch.__version__).base_version) < version.parse('''1.11''')
def _a ( SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : tuple , SCREAMING_SNAKE_CASE : Path , SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : Union[str, Any] , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : Dict , SCREAMING_SNAKE_CASE : Union[str, Any]=False , ) -> Union[str, Any]:
"""simple docstring"""
output_path.parent.mkdir(parents=SCREAMING_SNAKE_CASE , exist_ok=SCREAMING_SNAKE_CASE )
# PyTorch deprecated the `enable_onnx_checker` and `use_external_data_format` arguments in v1.11,
# so we check the torch version for backwards compatibility
if is_torch_less_than_1_11:
export(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , f=output_path.as_posix() , input_names=SCREAMING_SNAKE_CASE , output_names=SCREAMING_SNAKE_CASE , dynamic_axes=SCREAMING_SNAKE_CASE , do_constant_folding=SCREAMING_SNAKE_CASE , use_external_data_format=SCREAMING_SNAKE_CASE , enable_onnx_checker=SCREAMING_SNAKE_CASE , opset_version=SCREAMING_SNAKE_CASE , )
else:
export(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , f=output_path.as_posix() , input_names=SCREAMING_SNAKE_CASE , output_names=SCREAMING_SNAKE_CASE , dynamic_axes=SCREAMING_SNAKE_CASE , do_constant_folding=SCREAMING_SNAKE_CASE , opset_version=SCREAMING_SNAKE_CASE , )
@torch.no_grad()
def _a ( SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : bool = False ) -> int:
"""simple docstring"""
__lowerCAmelCase: Any = torch.floataa if fpaa else torch.floataa
if fpaa and torch.cuda.is_available():
__lowerCAmelCase: str = 'cuda'
elif fpaa and not torch.cuda.is_available():
raise ValueError('`float16` model export is only supported on GPUs with CUDA' )
else:
__lowerCAmelCase: int = 'cpu'
__lowerCAmelCase: Tuple = StableDiffusionPipeline.from_pretrained(SCREAMING_SNAKE_CASE , torch_dtype=SCREAMING_SNAKE_CASE ).to(SCREAMING_SNAKE_CASE )
__lowerCAmelCase: Dict = Path(SCREAMING_SNAKE_CASE )
# TEXT ENCODER
__lowerCAmelCase: Optional[int] = pipeline.text_encoder.config.max_position_embeddings
__lowerCAmelCase: List[str] = pipeline.text_encoder.config.hidden_size
__lowerCAmelCase: str = pipeline.tokenizer(
'A sample prompt' , padding='max_length' , max_length=pipeline.tokenizer.model_max_length , truncation=SCREAMING_SNAKE_CASE , return_tensors='pt' , )
onnx_export(
pipeline.text_encoder , model_args=(text_input.input_ids.to(device=SCREAMING_SNAKE_CASE , dtype=torch.intaa )) , output_path=output_path / 'text_encoder' / 'model.onnx' , ordered_input_names=['input_ids'] , output_names=['last_hidden_state', 'pooler_output'] , dynamic_axes={
'input_ids': {0: 'batch', 1: 'sequence'},
} , opset=SCREAMING_SNAKE_CASE , )
del pipeline.text_encoder
# UNET
__lowerCAmelCase: List[str] = pipeline.unet.config.in_channels
__lowerCAmelCase: Dict = pipeline.unet.config.sample_size
__lowerCAmelCase: Optional[Any] = output_path / 'unet' / 'model.onnx'
onnx_export(
pipeline.unet , model_args=(
torch.randn(2 , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ).to(device=SCREAMING_SNAKE_CASE , dtype=SCREAMING_SNAKE_CASE ),
torch.randn(2 ).to(device=SCREAMING_SNAKE_CASE , dtype=SCREAMING_SNAKE_CASE ),
torch.randn(2 , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ).to(device=SCREAMING_SNAKE_CASE , dtype=SCREAMING_SNAKE_CASE ),
False,
) , output_path=SCREAMING_SNAKE_CASE , ordered_input_names=['sample', 'timestep', 'encoder_hidden_states', 'return_dict'] , output_names=['out_sample'] , dynamic_axes={
'sample': {0: 'batch', 1: 'channels', 2: 'height', 3: 'width'},
'timestep': {0: 'batch'},
'encoder_hidden_states': {0: 'batch', 1: 'sequence'},
} , opset=SCREAMING_SNAKE_CASE , use_external_data_format=SCREAMING_SNAKE_CASE , )
__lowerCAmelCase: Optional[int] = str(unet_path.absolute().as_posix() )
__lowerCAmelCase: List[Any] = os.path.dirname(SCREAMING_SNAKE_CASE )
__lowerCAmelCase: int = onnx.load(SCREAMING_SNAKE_CASE )
# clean up existing tensor files
shutil.rmtree(SCREAMING_SNAKE_CASE )
os.mkdir(SCREAMING_SNAKE_CASE )
# collate external tensor files into one
onnx.save_model(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , save_as_external_data=SCREAMING_SNAKE_CASE , all_tensors_to_one_file=SCREAMING_SNAKE_CASE , location='weights.pb' , convert_attribute=SCREAMING_SNAKE_CASE , )
del pipeline.unet
# VAE ENCODER
__lowerCAmelCase: Dict = pipeline.vae
__lowerCAmelCase: List[str] = vae_encoder.config.in_channels
__lowerCAmelCase: Optional[int] = vae_encoder.config.sample_size
# need to get the raw tensor output (sample) from the encoder
__lowerCAmelCase: str = lambda SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : vae_encoder.encode(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )[0].sample()
onnx_export(
SCREAMING_SNAKE_CASE , model_args=(
torch.randn(1 , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ).to(device=SCREAMING_SNAKE_CASE , dtype=SCREAMING_SNAKE_CASE ),
False,
) , output_path=output_path / 'vae_encoder' / 'model.onnx' , ordered_input_names=['sample', 'return_dict'] , output_names=['latent_sample'] , dynamic_axes={
'sample': {0: 'batch', 1: 'channels', 2: 'height', 3: 'width'},
} , opset=SCREAMING_SNAKE_CASE , )
# VAE DECODER
__lowerCAmelCase: Optional[int] = pipeline.vae
__lowerCAmelCase: int = vae_decoder.config.latent_channels
__lowerCAmelCase: Dict = vae_decoder.config.out_channels
# forward only through the decoder part
__lowerCAmelCase: int = vae_encoder.decode
onnx_export(
SCREAMING_SNAKE_CASE , model_args=(
torch.randn(1 , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ).to(device=SCREAMING_SNAKE_CASE , dtype=SCREAMING_SNAKE_CASE ),
False,
) , output_path=output_path / 'vae_decoder' / 'model.onnx' , ordered_input_names=['latent_sample', 'return_dict'] , output_names=['sample'] , dynamic_axes={
'latent_sample': {0: 'batch', 1: 'channels', 2: 'height', 3: 'width'},
} , opset=SCREAMING_SNAKE_CASE , )
del pipeline.vae
# SAFETY CHECKER
if pipeline.safety_checker is not None:
__lowerCAmelCase: List[str] = pipeline.safety_checker
__lowerCAmelCase: Union[str, Any] = safety_checker.config.vision_config.num_channels
__lowerCAmelCase: Dict = safety_checker.config.vision_config.image_size
__lowerCAmelCase: List[str] = safety_checker.forward_onnx
onnx_export(
pipeline.safety_checker , model_args=(
torch.randn(
1 , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , ).to(device=SCREAMING_SNAKE_CASE , dtype=SCREAMING_SNAKE_CASE ),
torch.randn(1 , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ).to(device=SCREAMING_SNAKE_CASE , dtype=SCREAMING_SNAKE_CASE ),
) , output_path=output_path / 'safety_checker' / 'model.onnx' , ordered_input_names=['clip_input', 'images'] , output_names=['out_images', 'has_nsfw_concepts'] , dynamic_axes={
'clip_input': {0: 'batch', 1: 'channels', 2: 'height', 3: 'width'},
'images': {0: 'batch', 1: 'height', 2: 'width', 3: 'channels'},
} , opset=SCREAMING_SNAKE_CASE , )
del pipeline.safety_checker
__lowerCAmelCase: Dict = OnnxRuntimeModel.from_pretrained(output_path / 'safety_checker' )
__lowerCAmelCase: Union[str, Any] = pipeline.feature_extractor
else:
__lowerCAmelCase: Dict = None
__lowerCAmelCase: str = None
__lowerCAmelCase: Optional[Any] = OnnxStableDiffusionPipeline(
vae_encoder=OnnxRuntimeModel.from_pretrained(output_path / 'vae_encoder' ) , vae_decoder=OnnxRuntimeModel.from_pretrained(output_path / 'vae_decoder' ) , text_encoder=OnnxRuntimeModel.from_pretrained(output_path / 'text_encoder' ) , tokenizer=pipeline.tokenizer , unet=OnnxRuntimeModel.from_pretrained(output_path / 'unet' ) , scheduler=pipeline.scheduler , safety_checker=SCREAMING_SNAKE_CASE , feature_extractor=SCREAMING_SNAKE_CASE , requires_safety_checker=safety_checker is not None , )
onnx_pipeline.save_pretrained(SCREAMING_SNAKE_CASE )
print('ONNX pipeline saved to' , SCREAMING_SNAKE_CASE )
del pipeline
del onnx_pipeline
__lowerCAmelCase: Dict = OnnxStableDiffusionPipeline.from_pretrained(SCREAMING_SNAKE_CASE , provider='CPUExecutionProvider' )
print('ONNX pipeline is loadable' )
if __name__ == "__main__":
_a = argparse.ArgumentParser()
parser.add_argument(
'''--model_path''',
type=str,
required=True,
help='''Path to the `diffusers` checkpoint to convert (either a local directory or on the Hub).''',
)
parser.add_argument('''--output_path''', type=str, required=True, help='''Path to the output model.''')
parser.add_argument(
'''--opset''',
default=1_4,
type=int,
help='''The version of the ONNX operator set to use.''',
)
parser.add_argument('''--fp16''', action='''store_true''', default=False, help='''Export the models in `float16` mode''')
_a = parser.parse_args()
convert_models(args.model_path, args.output_path, args.opset, args.fpaa)
| 322 |
from __future__ import annotations
import copy
import inspect
import unittest
import numpy as np
from transformers import is_tf_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_tf, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST,
TF_MODEL_FOR_MULTIPLE_CHOICE_MAPPING,
TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
LayoutLMvaConfig,
TFLayoutLMvaForQuestionAnswering,
TFLayoutLMvaForSequenceClassification,
TFLayoutLMvaForTokenClassification,
TFLayoutLMvaModel,
)
if is_vision_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class A_ :
def __init__( self : Dict , UpperCAmelCase : Optional[Any] , UpperCAmelCase : Optional[int]=2 , UpperCAmelCase : int=3 , UpperCAmelCase : int=4 , UpperCAmelCase : str=2 , UpperCAmelCase : Union[str, Any]=7 , UpperCAmelCase : List[str]=True , UpperCAmelCase : Dict=True , UpperCAmelCase : Tuple=True , UpperCAmelCase : Optional[Any]=True , UpperCAmelCase : Optional[Any]=9_9 , UpperCAmelCase : Tuple=3_6 , UpperCAmelCase : Tuple=2 , UpperCAmelCase : Optional[int]=4 , UpperCAmelCase : Union[str, Any]=3_7 , UpperCAmelCase : Any="gelu" , UpperCAmelCase : Optional[int]=0.1 , UpperCAmelCase : Optional[Any]=0.1 , UpperCAmelCase : List[str]=5_1_2 , UpperCAmelCase : int=1_6 , UpperCAmelCase : Optional[Any]=2 , UpperCAmelCase : Optional[Any]=0.02 , UpperCAmelCase : Optional[Any]=6 , UpperCAmelCase : int=6 , UpperCAmelCase : str=3 , UpperCAmelCase : Any=4 , UpperCAmelCase : Optional[int]=None , UpperCAmelCase : List[str]=1_0_0_0 , ) -> int:
__lowerCAmelCase: List[str] = parent
__lowerCAmelCase: List[str] = batch_size
__lowerCAmelCase: Optional[Any] = num_channels
__lowerCAmelCase: Tuple = image_size
__lowerCAmelCase: str = patch_size
__lowerCAmelCase: List[str] = is_training
__lowerCAmelCase: Union[str, Any] = use_input_mask
__lowerCAmelCase: Union[str, Any] = use_token_type_ids
__lowerCAmelCase: Tuple = use_labels
__lowerCAmelCase: Optional[int] = vocab_size
__lowerCAmelCase: Any = hidden_size
__lowerCAmelCase: Tuple = num_hidden_layers
__lowerCAmelCase: Optional[int] = num_attention_heads
__lowerCAmelCase: Dict = intermediate_size
__lowerCAmelCase: Union[str, Any] = hidden_act
__lowerCAmelCase: str = hidden_dropout_prob
__lowerCAmelCase: str = attention_probs_dropout_prob
__lowerCAmelCase: str = max_position_embeddings
__lowerCAmelCase: str = type_vocab_size
__lowerCAmelCase: Optional[Any] = type_sequence_label_size
__lowerCAmelCase: Union[str, Any] = initializer_range
__lowerCAmelCase: List[str] = coordinate_size
__lowerCAmelCase: Tuple = shape_size
__lowerCAmelCase: List[Any] = num_labels
__lowerCAmelCase: Any = num_choices
__lowerCAmelCase: List[str] = scope
__lowerCAmelCase: Dict = range_bbox
# LayoutLMv3's sequence length equals the number of text tokens + number of patches + 1 (we add 1 for the CLS token)
__lowerCAmelCase: Optional[Any] = text_seq_length
__lowerCAmelCase: List[Any] = (image_size // patch_size) ** 2 + 1
__lowerCAmelCase: int = self.text_seq_length + self.image_seq_length
def UpperCAmelCase ( self : Any ) -> Any:
__lowerCAmelCase: Dict = ids_tensor([self.batch_size, self.text_seq_length] , self.vocab_size )
__lowerCAmelCase: Any = ids_tensor([self.batch_size, self.text_seq_length, 4] , self.range_bbox )
__lowerCAmelCase: str = bbox.numpy()
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
__lowerCAmelCase: Optional[Any] = bbox[i, j, 3]
__lowerCAmelCase: Tuple = bbox[i, j, 1]
__lowerCAmelCase: Dict = tmp_coordinate
if bbox[i, j, 2] < bbox[i, j, 0]:
__lowerCAmelCase: Any = bbox[i, j, 2]
__lowerCAmelCase: int = bbox[i, j, 0]
__lowerCAmelCase: int = tmp_coordinate
__lowerCAmelCase: List[Any] = tf.constant(UpperCAmelCase )
__lowerCAmelCase: Dict = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__lowerCAmelCase: Union[str, Any] = None
if self.use_input_mask:
__lowerCAmelCase: List[Any] = random_attention_mask([self.batch_size, self.text_seq_length] )
__lowerCAmelCase: int = None
if self.use_token_type_ids:
__lowerCAmelCase: List[Any] = ids_tensor([self.batch_size, self.text_seq_length] , self.type_vocab_size )
__lowerCAmelCase: str = None
__lowerCAmelCase: Dict = None
if self.use_labels:
__lowerCAmelCase: Dict = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__lowerCAmelCase: List[str] = ids_tensor([self.batch_size, self.text_seq_length] , self.num_labels )
__lowerCAmelCase: Dict = LayoutLMvaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , coordinate_size=self.coordinate_size , shape_size=self.shape_size , input_size=self.image_size , patch_size=self.patch_size , )
return config, input_ids, bbox, pixel_values, token_type_ids, input_mask, sequence_labels, token_labels
def UpperCAmelCase ( self : Tuple , UpperCAmelCase : str , UpperCAmelCase : Optional[int] , UpperCAmelCase : Optional[int] , UpperCAmelCase : int , UpperCAmelCase : List[Any] , UpperCAmelCase : Tuple ) -> int:
__lowerCAmelCase: Tuple = TFLayoutLMvaModel(config=UpperCAmelCase )
# text + image
__lowerCAmelCase: Dict = model(UpperCAmelCase , pixel_values=UpperCAmelCase , training=UpperCAmelCase )
__lowerCAmelCase: List[str] = model(
UpperCAmelCase , bbox=UpperCAmelCase , pixel_values=UpperCAmelCase , attention_mask=UpperCAmelCase , token_type_ids=UpperCAmelCase , training=UpperCAmelCase , )
__lowerCAmelCase: Optional[Any] = model(UpperCAmelCase , bbox=UpperCAmelCase , pixel_values=UpperCAmelCase , training=UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
# text only
__lowerCAmelCase: str = model(UpperCAmelCase , training=UpperCAmelCase )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.text_seq_length, self.hidden_size) )
# image only
__lowerCAmelCase: List[str] = model({'pixel_values': pixel_values} , training=UpperCAmelCase )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.image_seq_length, self.hidden_size) )
def UpperCAmelCase ( self : Union[str, Any] , UpperCAmelCase : Optional[int] , UpperCAmelCase : Any , UpperCAmelCase : Optional[int] , UpperCAmelCase : List[str] , UpperCAmelCase : int , UpperCAmelCase : Tuple , UpperCAmelCase : List[Any] ) -> int:
__lowerCAmelCase: List[str] = self.num_labels
__lowerCAmelCase: Tuple = TFLayoutLMvaForSequenceClassification(config=UpperCAmelCase )
__lowerCAmelCase: int = model(
UpperCAmelCase , bbox=UpperCAmelCase , pixel_values=UpperCAmelCase , attention_mask=UpperCAmelCase , token_type_ids=UpperCAmelCase , labels=UpperCAmelCase , training=UpperCAmelCase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def UpperCAmelCase ( self : Any , UpperCAmelCase : List[Any] , UpperCAmelCase : Optional[Any] , UpperCAmelCase : int , UpperCAmelCase : int , UpperCAmelCase : List[Any] , UpperCAmelCase : str , UpperCAmelCase : int ) -> Any:
__lowerCAmelCase: Union[str, Any] = self.num_labels
__lowerCAmelCase: List[str] = TFLayoutLMvaForTokenClassification(config=UpperCAmelCase )
__lowerCAmelCase: Any = model(
UpperCAmelCase , bbox=UpperCAmelCase , pixel_values=UpperCAmelCase , attention_mask=UpperCAmelCase , token_type_ids=UpperCAmelCase , labels=UpperCAmelCase , training=UpperCAmelCase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.text_seq_length, self.num_labels) )
def UpperCAmelCase ( self : List[Any] , UpperCAmelCase : Any , UpperCAmelCase : str , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : Any , UpperCAmelCase : str , UpperCAmelCase : List[str] , UpperCAmelCase : List[Any] ) -> Any:
__lowerCAmelCase: str = 2
__lowerCAmelCase: Dict = TFLayoutLMvaForQuestionAnswering(config=UpperCAmelCase )
__lowerCAmelCase: int = model(
UpperCAmelCase , bbox=UpperCAmelCase , pixel_values=UpperCAmelCase , attention_mask=UpperCAmelCase , token_type_ids=UpperCAmelCase , start_positions=UpperCAmelCase , end_positions=UpperCAmelCase , training=UpperCAmelCase , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def UpperCAmelCase ( self : List[str] ) -> Union[str, Any]:
__lowerCAmelCase: Union[str, Any] = self.prepare_config_and_inputs()
((__lowerCAmelCase) , (__lowerCAmelCase) , (__lowerCAmelCase) , (__lowerCAmelCase) , (__lowerCAmelCase) , (__lowerCAmelCase) , (__lowerCAmelCase) , (__lowerCAmelCase)): List[str] = config_and_inputs
__lowerCAmelCase: List[str] = {
'input_ids': input_ids,
'bbox': bbox,
'pixel_values': pixel_values,
'token_type_ids': token_type_ids,
'attention_mask': input_mask,
}
return config, inputs_dict
@require_tf
class A_ ( snake_case__ , snake_case__ , unittest.TestCase ):
_lowercase : List[Any] = (
(
TFLayoutLMvaModel,
TFLayoutLMvaForQuestionAnswering,
TFLayoutLMvaForSequenceClassification,
TFLayoutLMvaForTokenClassification,
)
if is_tf_available()
else ()
)
_lowercase : Tuple = (
{'document-question-answering': TFLayoutLMvaForQuestionAnswering, 'feature-extraction': TFLayoutLMvaModel}
if is_tf_available()
else {}
)
_lowercase : Union[str, Any] = False
_lowercase : Dict = False
_lowercase : Tuple = False
def UpperCAmelCase ( self : Optional[Any] , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : int , UpperCAmelCase : Tuple , UpperCAmelCase : Optional[Any] , UpperCAmelCase : Optional[Any] ) -> List[str]:
return True
def UpperCAmelCase ( self : Tuple , UpperCAmelCase : int , UpperCAmelCase : Tuple , UpperCAmelCase : Dict=False ) -> dict:
__lowerCAmelCase: Optional[Any] = copy.deepcopy(UpperCAmelCase )
if model_class in get_values(UpperCAmelCase ):
__lowerCAmelCase: int = {
k: tf.tile(tf.expand_dims(UpperCAmelCase , 1 ) , (1, self.model_tester.num_choices) + (1,) * (v.ndim - 1) )
if isinstance(UpperCAmelCase , tf.Tensor ) and v.ndim > 0
else v
for k, v in inputs_dict.items()
}
if return_labels:
if model_class in get_values(UpperCAmelCase ):
__lowerCAmelCase: Tuple = tf.ones(self.model_tester.batch_size , dtype=tf.intaa )
elif model_class in get_values(UpperCAmelCase ):
__lowerCAmelCase: Optional[Any] = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa )
__lowerCAmelCase: Union[str, Any] = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa )
elif model_class in get_values(UpperCAmelCase ):
__lowerCAmelCase: Union[str, Any] = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa )
elif model_class in get_values(UpperCAmelCase ):
__lowerCAmelCase: str = tf.zeros(
(self.model_tester.batch_size, self.model_tester.text_seq_length) , dtype=tf.intaa )
return inputs_dict
def UpperCAmelCase ( self : Optional[int] ) -> Optional[Any]:
__lowerCAmelCase: Tuple = TFLayoutLMvaModelTester(self )
__lowerCAmelCase: str = ConfigTester(self , config_class=UpperCAmelCase , hidden_size=3_7 )
def UpperCAmelCase ( self : Tuple ) -> Dict:
self.config_tester.run_common_tests()
def UpperCAmelCase ( self : List[Any] ) -> Tuple:
__lowerCAmelCase , __lowerCAmelCase: Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowerCAmelCase: List[Any] = model_class(UpperCAmelCase )
if getattr(UpperCAmelCase , 'hf_compute_loss' , UpperCAmelCase ):
# The number of elements in the loss should be the same as the number of elements in the label
__lowerCAmelCase: Optional[int] = self._prepare_for_class(inputs_dict.copy() , UpperCAmelCase , return_labels=UpperCAmelCase )
__lowerCAmelCase: List[Any] = prepared_for_class[
sorted(prepared_for_class.keys() - inputs_dict.keys() , reverse=UpperCAmelCase )[0]
]
__lowerCAmelCase: Tuple = added_label.shape.as_list()[:1]
# Test that model correctly compute the loss with kwargs
__lowerCAmelCase: Optional[Any] = self._prepare_for_class(inputs_dict.copy() , UpperCAmelCase , return_labels=UpperCAmelCase )
__lowerCAmelCase: Tuple = prepared_for_class.pop('input_ids' )
__lowerCAmelCase: Union[str, Any] = model(UpperCAmelCase , **UpperCAmelCase )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
# Test that model correctly compute the loss when we mask some positions
__lowerCAmelCase: Union[str, Any] = self._prepare_for_class(inputs_dict.copy() , UpperCAmelCase , return_labels=UpperCAmelCase )
__lowerCAmelCase: Optional[int] = prepared_for_class.pop('input_ids' )
if "labels" in prepared_for_class:
__lowerCAmelCase: str = prepared_for_class['labels'].numpy()
if len(labels.shape ) > 1 and labels.shape[1] != 1:
__lowerCAmelCase: Tuple = -1_0_0
__lowerCAmelCase: Union[str, Any] = tf.convert_to_tensor(UpperCAmelCase )
__lowerCAmelCase: Dict = model(UpperCAmelCase , **UpperCAmelCase )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
self.assertTrue(not np.any(np.isnan(loss.numpy() ) ) )
# Test that model correctly compute the loss with a dict
__lowerCAmelCase: str = self._prepare_for_class(inputs_dict.copy() , UpperCAmelCase , return_labels=UpperCAmelCase )
__lowerCAmelCase: Optional[Any] = model(UpperCAmelCase )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
# Test that model correctly compute the loss with a tuple
__lowerCAmelCase: Any = self._prepare_for_class(inputs_dict.copy() , UpperCAmelCase , return_labels=UpperCAmelCase )
# Get keys that were added with the _prepare_for_class function
__lowerCAmelCase: Tuple = prepared_for_class.keys() - inputs_dict.keys()
__lowerCAmelCase: Dict = inspect.signature(model.call ).parameters
__lowerCAmelCase: Dict = list(signature.keys() )
# Create a dictionary holding the location of the tensors in the tuple
__lowerCAmelCase: str = {0: 'input_ids'}
for label_key in label_keys:
__lowerCAmelCase: Optional[Any] = signature_names.index(UpperCAmelCase )
__lowerCAmelCase: Tuple = label_key
__lowerCAmelCase: Tuple = sorted(tuple_index_mapping.items() )
# Initialize a list with their default values, update the values and convert to a tuple
__lowerCAmelCase: List[Any] = []
for name in signature_names:
if name != "kwargs":
list_input.append(signature[name].default )
for index, value in sorted_tuple_index_mapping:
__lowerCAmelCase: Optional[Any] = prepared_for_class[value]
__lowerCAmelCase: Union[str, Any] = tuple(UpperCAmelCase )
# Send to model
__lowerCAmelCase: Any = model(tuple_input[:-1] )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
def UpperCAmelCase ( self : Dict ) -> Tuple:
(
(
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) ,
): str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
def UpperCAmelCase ( self : Dict ) -> int:
(
(
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) ,
): str = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
__lowerCAmelCase: Tuple = type
self.model_tester.create_and_check_model(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
def UpperCAmelCase ( self : str ) -> List[str]:
(
(
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) ,
): Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
def UpperCAmelCase ( self : int ) -> List[str]:
(
(
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) ,
): Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
def UpperCAmelCase ( self : Tuple ) -> str:
(
(
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) ,
): Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
@slow
def UpperCAmelCase ( self : Union[str, Any] ) -> Tuple:
for model_name in TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowerCAmelCase: Optional[int] = TFLayoutLMvaModel.from_pretrained(UpperCAmelCase )
self.assertIsNotNone(UpperCAmelCase )
def _a ( ) -> Any:
"""simple docstring"""
__lowerCAmelCase: Dict = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_tf
class A_ ( unittest.TestCase ):
@cached_property
def UpperCAmelCase ( self : int ) -> Dict:
return LayoutLMvaImageProcessor(apply_ocr=UpperCAmelCase ) if is_vision_available() else None
@slow
def UpperCAmelCase ( self : Any ) -> List[str]:
__lowerCAmelCase: Any = TFLayoutLMvaModel.from_pretrained('microsoft/layoutlmv3-base' )
__lowerCAmelCase: Tuple = self.default_image_processor
__lowerCAmelCase: str = prepare_img()
__lowerCAmelCase: Optional[int] = image_processor(images=UpperCAmelCase , return_tensors='tf' ).pixel_values
__lowerCAmelCase: Dict = tf.constant([[1, 2]] )
__lowerCAmelCase: str = tf.expand_dims(tf.constant([[1, 2, 3, 4], [5, 6, 7, 8]] ) , axis=0 )
# forward pass
__lowerCAmelCase: List[str] = model(input_ids=UpperCAmelCase , bbox=UpperCAmelCase , pixel_values=UpperCAmelCase , training=UpperCAmelCase )
# verify the logits
__lowerCAmelCase: Tuple = (1, 1_9_9, 7_6_8)
self.assertEqual(outputs.last_hidden_state.shape , UpperCAmelCase )
__lowerCAmelCase: str = tf.constant(
[[-0.0529, 0.3618, 0.1632], [-0.1587, -0.1667, -0.0400], [-0.1557, -0.1671, -0.0505]] )
self.assertTrue(np.allclose(outputs.last_hidden_state[0, :3, :3] , UpperCAmelCase , atol=1E-4 ) )
| 322 | 1 |
import argparse
import hashlib
import os
import urllib
import warnings
import torch
from torch import nn
from tqdm import tqdm
from transformers import WhisperConfig, WhisperForConditionalGeneration
_a = {
'''tiny.en''': '''https://openaipublic.azureedge.net/main/whisper/models/d3dd57d32accea0b295c96e26691aa14d8822fac7d9d27d5dc00b4ca2826dd03/tiny.en.pt''',
'''tiny''': '''https://openaipublic.azureedge.net/main/whisper/models/65147644a518d12f04e32d6f3b26facc3f8dd46e5390956a9424a650c0ce22b9/tiny.pt''',
'''base.en''': '''https://openaipublic.azureedge.net/main/whisper/models/25a8566e1d0c1e2231d1c762132cd20e0f96a85d16145c3a00adf5d1ac670ead/base.en.pt''',
'''base''': '''https://openaipublic.azureedge.net/main/whisper/models/ed3a0b6b1c0edf879ad9b11b1af5a0e6ab5db9205f891f668f8b0e6c6326e34e/base.pt''',
'''small.en''': '''https://openaipublic.azureedge.net/main/whisper/models/f953ad0fd29cacd07d5a9eda5624af0f6bcf2258be67c92b79389873d91e0872/small.en.pt''',
'''small''': '''https://openaipublic.azureedge.net/main/whisper/models/9ecf779972d90ba49c06d968637d720dd632c55bbf19d441fb42bf17a411e794/small.pt''',
'''medium.en''': '''https://openaipublic.azureedge.net/main/whisper/models/d7440d1dc186f76616474e0ff0b3b6b879abc9d1a4926b7adfa41db2d497ab4f/medium.en.pt''',
'''medium''': '''https://openaipublic.azureedge.net/main/whisper/models/345ae4da62f9b3d59415adc60127b97c714f32e89e936602e85993674d08dcb1/medium.pt''',
'''large''': '''https://openaipublic.azureedge.net/main/whisper/models/e4b87e7e0bf463eb8e6956e646f1e277e901512310def2c24bf0e11bd3c28e9a/large.pt''',
'''large-v2''': '''https://openaipublic.azureedge.net/main/whisper/models/81f7c96c852ee8fc832187b0132e569d6c3065a3252ed18e56effd0b6a73e524/large-v2.pt''',
}
def _a ( SCREAMING_SNAKE_CASE : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
__lowerCAmelCase: Optional[Any] = ['layers', 'blocks']
for k in ignore_keys:
state_dict.pop(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
_a = {
'''blocks''': '''layers''',
'''mlp.0''': '''fc1''',
'''mlp.2''': '''fc2''',
'''mlp_ln''': '''final_layer_norm''',
'''.attn.query''': '''.self_attn.q_proj''',
'''.attn.key''': '''.self_attn.k_proj''',
'''.attn.value''': '''.self_attn.v_proj''',
'''.attn_ln''': '''.self_attn_layer_norm''',
'''.attn.out''': '''.self_attn.out_proj''',
'''.cross_attn.query''': '''.encoder_attn.q_proj''',
'''.cross_attn.key''': '''.encoder_attn.k_proj''',
'''.cross_attn.value''': '''.encoder_attn.v_proj''',
'''.cross_attn_ln''': '''.encoder_attn_layer_norm''',
'''.cross_attn.out''': '''.encoder_attn.out_proj''',
'''decoder.ln.''': '''decoder.layer_norm.''',
'''encoder.ln.''': '''encoder.layer_norm.''',
'''token_embedding''': '''embed_tokens''',
'''encoder.positional_embedding''': '''encoder.embed_positions.weight''',
'''decoder.positional_embedding''': '''decoder.embed_positions.weight''',
'''ln_post''': '''layer_norm''',
}
def _a ( SCREAMING_SNAKE_CASE : str ) -> List[str]:
"""simple docstring"""
__lowerCAmelCase: str = list(s_dict.keys() )
for key in keys:
__lowerCAmelCase: int = key
for k, v in WHISPER_MAPPING.items():
if k in key:
__lowerCAmelCase: List[str] = new_key.replace(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
print(f'''{key} -> {new_key}''' )
__lowerCAmelCase: Tuple = s_dict.pop(SCREAMING_SNAKE_CASE )
return s_dict
def _a ( SCREAMING_SNAKE_CASE : Optional[Any] ) -> int:
"""simple docstring"""
__lowerCAmelCase , __lowerCAmelCase: Union[str, Any] = emb.weight.shape
__lowerCAmelCase: Optional[Any] = nn.Linear(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , bias=SCREAMING_SNAKE_CASE )
__lowerCAmelCase: int = emb.weight.data
return lin_layer
def _a ( SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : str ) -> bytes:
"""simple docstring"""
os.makedirs(SCREAMING_SNAKE_CASE , exist_ok=SCREAMING_SNAKE_CASE )
__lowerCAmelCase: List[str] = os.path.basename(SCREAMING_SNAKE_CASE )
__lowerCAmelCase: List[Any] = url.split('/' )[-2]
__lowerCAmelCase: Dict = os.path.join(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
if os.path.exists(SCREAMING_SNAKE_CASE ) and not os.path.isfile(SCREAMING_SNAKE_CASE ):
raise RuntimeError(f'''{download_target} exists and is not a regular file''' )
if os.path.isfile(SCREAMING_SNAKE_CASE ):
__lowerCAmelCase: Optional[Any] = open(SCREAMING_SNAKE_CASE , 'rb' ).read()
if hashlib.shaaaa(SCREAMING_SNAKE_CASE ).hexdigest() == expected_shaaaa:
return model_bytes
else:
warnings.warn(f'''{download_target} exists, but the SHA256 checksum does not match; re-downloading the file''' )
with urllib.request.urlopen(SCREAMING_SNAKE_CASE ) as source, open(SCREAMING_SNAKE_CASE , 'wb' ) as output:
with tqdm(
total=int(source.info().get('Content-Length' ) ) , ncols=80 , unit='iB' , unit_scale=SCREAMING_SNAKE_CASE , unit_divisor=10_24 ) as loop:
while True:
__lowerCAmelCase: List[Any] = source.read(81_92 )
if not buffer:
break
output.write(SCREAMING_SNAKE_CASE )
loop.update(len(SCREAMING_SNAKE_CASE ) )
__lowerCAmelCase: int = open(SCREAMING_SNAKE_CASE , 'rb' ).read()
if hashlib.shaaaa(SCREAMING_SNAKE_CASE ).hexdigest() != expected_shaaaa:
raise RuntimeError(
'Model has been downloaded but the SHA256 checksum does not not match. Please retry loading the model.' )
return model_bytes
def _a ( SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
if ".pt" not in checkpoint_path:
__lowerCAmelCase: Tuple = _download(_MODELS[checkpoint_path] )
else:
__lowerCAmelCase: List[Any] = torch.load(SCREAMING_SNAKE_CASE , map_location='cpu' )
__lowerCAmelCase: Optional[int] = original_checkpoint['dims']
__lowerCAmelCase: int = original_checkpoint['model_state_dict']
__lowerCAmelCase: List[Any] = state_dict['decoder.token_embedding.weight']
remove_ignore_keys_(SCREAMING_SNAKE_CASE )
rename_keys(SCREAMING_SNAKE_CASE )
__lowerCAmelCase: Optional[Any] = True
__lowerCAmelCase: Optional[Any] = state_dict['decoder.layers.0.fc1.weight'].shape[0]
__lowerCAmelCase: Tuple = WhisperConfig(
vocab_size=dimensions['n_vocab'] , encoder_ffn_dim=SCREAMING_SNAKE_CASE , decoder_ffn_dim=SCREAMING_SNAKE_CASE , num_mel_bins=dimensions['n_mels'] , d_model=dimensions['n_audio_state'] , max_target_positions=dimensions['n_text_ctx'] , encoder_layers=dimensions['n_audio_layer'] , encoder_attention_heads=dimensions['n_audio_head'] , decoder_layers=dimensions['n_text_layer'] , decoder_attention_heads=dimensions['n_text_state'] , max_source_positions=dimensions['n_audio_ctx'] , )
__lowerCAmelCase: Any = WhisperForConditionalGeneration(SCREAMING_SNAKE_CASE )
__lowerCAmelCase , __lowerCAmelCase: Tuple = model.model.load_state_dict(SCREAMING_SNAKE_CASE , strict=SCREAMING_SNAKE_CASE )
if len(SCREAMING_SNAKE_CASE ) > 0 and not set(SCREAMING_SNAKE_CASE ) <= {
"encoder.embed_positions.weights",
"decoder.embed_positions.weights",
}:
raise ValueError(
'Only `encoder.embed_positions.weights` and `decoder.embed_positions.weights` are allowed to be missing,'
f''' but all the following weights are missing {missing}''' )
if tie_embeds:
__lowerCAmelCase: str = make_linear_from_emb(model.model.decoder.embed_tokens )
else:
__lowerCAmelCase: Optional[Any] = proj_out_weights
model.save_pretrained(SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
_a = argparse.ArgumentParser()
# # Required parameters
parser.add_argument('''--checkpoint_path''', type=str, help='''Patht to the downloaded checkpoints''')
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
_a = parser.parse_args()
convert_openai_whisper_to_tfms(args.checkpoint_path, args.pytorch_dump_folder_path)
| 322 |
import unittest
import numpy as np
from transformers import AlbertConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.albert.modeling_flax_albert import (
FlaxAlbertForMaskedLM,
FlaxAlbertForMultipleChoice,
FlaxAlbertForPreTraining,
FlaxAlbertForQuestionAnswering,
FlaxAlbertForSequenceClassification,
FlaxAlbertForTokenClassification,
FlaxAlbertModel,
)
class A_ ( unittest.TestCase ):
def __init__( self : List[str] , UpperCAmelCase : Tuple , UpperCAmelCase : Optional[Any]=1_3 , UpperCAmelCase : Optional[int]=7 , UpperCAmelCase : Tuple=True , UpperCAmelCase : str=True , UpperCAmelCase : List[Any]=True , UpperCAmelCase : Union[str, Any]=True , UpperCAmelCase : List[str]=9_9 , UpperCAmelCase : Optional[int]=3_2 , UpperCAmelCase : Dict=5 , UpperCAmelCase : int=4 , UpperCAmelCase : Optional[Any]=3_7 , UpperCAmelCase : List[str]="gelu" , UpperCAmelCase : Tuple=0.1 , UpperCAmelCase : int=0.1 , UpperCAmelCase : str=5_1_2 , UpperCAmelCase : Dict=1_6 , UpperCAmelCase : Union[str, Any]=2 , UpperCAmelCase : int=0.02 , UpperCAmelCase : List[Any]=4 , ) -> Optional[Any]:
__lowerCAmelCase: str = parent
__lowerCAmelCase: Dict = batch_size
__lowerCAmelCase: Optional[int] = seq_length
__lowerCAmelCase: Dict = is_training
__lowerCAmelCase: Optional[Any] = use_attention_mask
__lowerCAmelCase: List[Any] = use_token_type_ids
__lowerCAmelCase: Optional[int] = use_labels
__lowerCAmelCase: Optional[Any] = vocab_size
__lowerCAmelCase: Optional[Any] = hidden_size
__lowerCAmelCase: Tuple = num_hidden_layers
__lowerCAmelCase: List[str] = num_attention_heads
__lowerCAmelCase: int = intermediate_size
__lowerCAmelCase: Union[str, Any] = hidden_act
__lowerCAmelCase: List[Any] = hidden_dropout_prob
__lowerCAmelCase: List[str] = attention_probs_dropout_prob
__lowerCAmelCase: Optional[int] = max_position_embeddings
__lowerCAmelCase: Union[str, Any] = type_vocab_size
__lowerCAmelCase: int = type_sequence_label_size
__lowerCAmelCase: Union[str, Any] = initializer_range
__lowerCAmelCase: Any = num_choices
def UpperCAmelCase ( self : Union[str, Any] ) -> Tuple:
__lowerCAmelCase: List[str] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__lowerCAmelCase: List[Any] = None
if self.use_attention_mask:
__lowerCAmelCase: List[str] = random_attention_mask([self.batch_size, self.seq_length] )
__lowerCAmelCase: Optional[Any] = None
if self.use_token_type_ids:
__lowerCAmelCase: List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__lowerCAmelCase: Optional[int] = AlbertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=UpperCAmelCase , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def UpperCAmelCase ( self : Dict ) -> Any:
__lowerCAmelCase: Optional[int] = self.prepare_config_and_inputs()
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase: Optional[Any] = config_and_inputs
__lowerCAmelCase: Tuple = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': attention_mask}
return config, inputs_dict
@require_flax
class A_ ( snake_case__ , unittest.TestCase ):
_lowercase : Dict = (
(
FlaxAlbertModel,
FlaxAlbertForPreTraining,
FlaxAlbertForMaskedLM,
FlaxAlbertForMultipleChoice,
FlaxAlbertForQuestionAnswering,
FlaxAlbertForSequenceClassification,
FlaxAlbertForTokenClassification,
FlaxAlbertForQuestionAnswering,
)
if is_flax_available()
else ()
)
def UpperCAmelCase ( self : List[str] ) -> Optional[int]:
__lowerCAmelCase: List[Any] = FlaxAlbertModelTester(self )
@slow
def UpperCAmelCase ( self : Tuple ) -> Dict:
for model_class_name in self.all_model_classes:
__lowerCAmelCase: Optional[Any] = model_class_name.from_pretrained('albert-base-v2' )
__lowerCAmelCase: Dict = model(np.ones((1, 1) ) )
self.assertIsNotNone(UpperCAmelCase )
@require_flax
class A_ ( unittest.TestCase ):
@slow
def UpperCAmelCase ( self : Any ) -> Any:
__lowerCAmelCase: List[Any] = FlaxAlbertModel.from_pretrained('albert-base-v2' )
__lowerCAmelCase: Optional[int] = np.array([[0, 3_4_5, 2_3_2, 3_2_8, 7_4_0, 1_4_0, 1_6_9_5, 6_9, 6_0_7_8, 1_5_8_8, 2]] )
__lowerCAmelCase: Tuple = np.array([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
__lowerCAmelCase: Tuple = model(UpperCAmelCase , attention_mask=UpperCAmelCase )[0]
__lowerCAmelCase: str = (1, 1_1, 7_6_8)
self.assertEqual(output.shape , UpperCAmelCase )
__lowerCAmelCase: List[str] = np.array(
[[[-0.6513, 1.5035, -0.2766], [-0.6515, 1.5046, -0.2780], [-0.6512, 1.5049, -0.2784]]] )
self.assertTrue(jnp.allclose(output[:, 1:4, 1:4] , UpperCAmelCase , atol=1E-4 ) )
| 322 | 1 |
from __future__ import annotations
_a = 1_0
def _a ( SCREAMING_SNAKE_CASE : list[int] ) -> list[int]:
"""simple docstring"""
__lowerCAmelCase: Optional[Any] = 1
__lowerCAmelCase: str = max(SCREAMING_SNAKE_CASE )
while placement <= max_digit:
# declare and initialize empty buckets
__lowerCAmelCase: list[list] = [[] for _ in range(SCREAMING_SNAKE_CASE )]
# split list_of_ints between the buckets
for i in list_of_ints:
__lowerCAmelCase: str = int((i / placement) % RADIX )
buckets[tmp].append(SCREAMING_SNAKE_CASE )
# put each buckets' contents into list_of_ints
__lowerCAmelCase: str = 0
for b in range(SCREAMING_SNAKE_CASE ):
for i in buckets[b]:
__lowerCAmelCase: List[str] = i
a += 1
# move to next
placement *= RADIX
return list_of_ints
if __name__ == "__main__":
import doctest
doctest.testmod()
| 322 |
import json
import os
import shutil
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import AutoConfig, BertConfig, GPTaConfig
from transformers.configuration_utils import PretrainedConfig
from transformers.testing_utils import TOKEN, USER, is_staging_test
sys.path.append(str(Path(__file__).parent.parent / '''utils'''))
from test_module.custom_configuration import CustomConfig # noqa E402
_a = {
'''return_dict''': False,
'''output_hidden_states''': True,
'''output_attentions''': True,
'''torchscript''': True,
'''torch_dtype''': '''float16''',
'''use_bfloat16''': True,
'''tf_legacy_loss''': True,
'''pruned_heads''': {'''a''': 1},
'''tie_word_embeddings''': False,
'''is_decoder''': True,
'''cross_attention_hidden_size''': 1_2_8,
'''add_cross_attention''': True,
'''tie_encoder_decoder''': True,
'''max_length''': 5_0,
'''min_length''': 3,
'''do_sample''': True,
'''early_stopping''': True,
'''num_beams''': 3,
'''num_beam_groups''': 3,
'''diversity_penalty''': 0.5,
'''temperature''': 2.0,
'''top_k''': 1_0,
'''top_p''': 0.7,
'''typical_p''': 0.2,
'''repetition_penalty''': 0.8,
'''length_penalty''': 0.8,
'''no_repeat_ngram_size''': 5,
'''encoder_no_repeat_ngram_size''': 5,
'''bad_words_ids''': [1, 2, 3],
'''num_return_sequences''': 3,
'''chunk_size_feed_forward''': 5,
'''output_scores''': True,
'''return_dict_in_generate''': True,
'''forced_bos_token_id''': 2,
'''forced_eos_token_id''': 3,
'''remove_invalid_values''': True,
'''architectures''': ['''BertModel'''],
'''finetuning_task''': '''translation''',
'''id2label''': {0: '''label'''},
'''label2id''': {'''label''': '''0'''},
'''tokenizer_class''': '''BertTokenizerFast''',
'''prefix''': '''prefix''',
'''bos_token_id''': 6,
'''pad_token_id''': 7,
'''eos_token_id''': 8,
'''sep_token_id''': 9,
'''decoder_start_token_id''': 1_0,
'''exponential_decay_length_penalty''': (5, 1.01),
'''suppress_tokens''': [0, 1],
'''begin_suppress_tokens''': 2,
'''task_specific_params''': {'''translation''': '''some_params'''},
'''problem_type''': '''regression''',
}
@is_staging_test
class A_ ( unittest.TestCase ):
@classmethod
def UpperCAmelCase ( cls : Dict ) -> List[str]:
__lowerCAmelCase: str = TOKEN
HfFolder.save_token(UpperCAmelCase )
@classmethod
def UpperCAmelCase ( cls : str ) -> List[Any]:
try:
delete_repo(token=cls._token , repo_id='test-config' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='valid_org/test-config-org' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='test-dynamic-config' )
except HTTPError:
pass
def UpperCAmelCase ( self : int ) -> Optional[int]:
__lowerCAmelCase: Any = BertConfig(
vocab_size=9_9 , hidden_size=3_2 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=3_7 )
config.push_to_hub('test-config' , use_auth_token=self._token )
__lowerCAmelCase: str = BertConfig.from_pretrained(F'''{USER}/test-config''' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(UpperCAmelCase , getattr(UpperCAmelCase , UpperCAmelCase ) )
# Reset repo
delete_repo(token=self._token , repo_id='test-config' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(UpperCAmelCase , repo_id='test-config' , push_to_hub=UpperCAmelCase , use_auth_token=self._token )
__lowerCAmelCase: Union[str, Any] = BertConfig.from_pretrained(F'''{USER}/test-config''' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(UpperCAmelCase , getattr(UpperCAmelCase , UpperCAmelCase ) )
def UpperCAmelCase ( self : int ) -> Dict:
__lowerCAmelCase: int = BertConfig(
vocab_size=9_9 , hidden_size=3_2 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=3_7 )
config.push_to_hub('valid_org/test-config-org' , use_auth_token=self._token )
__lowerCAmelCase: Dict = BertConfig.from_pretrained('valid_org/test-config-org' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(UpperCAmelCase , getattr(UpperCAmelCase , UpperCAmelCase ) )
# Reset repo
delete_repo(token=self._token , repo_id='valid_org/test-config-org' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(
UpperCAmelCase , repo_id='valid_org/test-config-org' , push_to_hub=UpperCAmelCase , use_auth_token=self._token )
__lowerCAmelCase: int = BertConfig.from_pretrained('valid_org/test-config-org' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(UpperCAmelCase , getattr(UpperCAmelCase , UpperCAmelCase ) )
def UpperCAmelCase ( self : Union[str, Any] ) -> List[str]:
CustomConfig.register_for_auto_class()
__lowerCAmelCase: Any = CustomConfig(attribute=4_2 )
config.push_to_hub('test-dynamic-config' , use_auth_token=self._token )
# This has added the proper auto_map field to the config
self.assertDictEqual(config.auto_map , {'AutoConfig': 'custom_configuration.CustomConfig'} )
__lowerCAmelCase: int = AutoConfig.from_pretrained(F'''{USER}/test-dynamic-config''' , trust_remote_code=UpperCAmelCase )
# Can't make an isinstance check because the new_config is from the FakeConfig class of a dynamic module
self.assertEqual(new_config.__class__.__name__ , 'CustomConfig' )
self.assertEqual(new_config.attribute , 4_2 )
class A_ ( unittest.TestCase ):
def UpperCAmelCase ( self : Union[str, Any] ) -> int:
__lowerCAmelCase: List[Any] = GPTaConfig()
# attempt to modify each of int/float/bool/str config records and verify they were updated
__lowerCAmelCase: Union[str, Any] = c.n_embd + 1 # int
__lowerCAmelCase: str = c.resid_pdrop + 1.0 # float
__lowerCAmelCase: List[Any] = not c.scale_attn_weights # bool
__lowerCAmelCase: List[str] = c.summary_type + 'foo' # str
c.update_from_string(
F'''n_embd={n_embd},resid_pdrop={resid_pdrop},scale_attn_weights={scale_attn_weights},summary_type={summary_type}''' )
self.assertEqual(UpperCAmelCase , c.n_embd , 'mismatch for key: n_embd' )
self.assertEqual(UpperCAmelCase , c.resid_pdrop , 'mismatch for key: resid_pdrop' )
self.assertEqual(UpperCAmelCase , c.scale_attn_weights , 'mismatch for key: scale_attn_weights' )
self.assertEqual(UpperCAmelCase , c.summary_type , 'mismatch for key: summary_type' )
def UpperCAmelCase ( self : Optional[Any] ) -> Any:
__lowerCAmelCase: str = PretrainedConfig()
__lowerCAmelCase: Optional[int] = [key for key in base_config.__dict__ if key not in config_common_kwargs]
# If this part of the test fails, you have arguments to addin config_common_kwargs above.
self.assertListEqual(
UpperCAmelCase , ['is_encoder_decoder', '_name_or_path', '_commit_hash', 'transformers_version'] )
__lowerCAmelCase: int = [key for key, value in config_common_kwargs.items() if value == getattr(UpperCAmelCase , UpperCAmelCase )]
if len(UpperCAmelCase ) > 0:
raise ValueError(
'The following keys are set with the default values in'
' `test_configuration_common.config_common_kwargs` pick another value for them:'
F''' {', '.join(UpperCAmelCase )}.''' )
def UpperCAmelCase ( self : int ) -> Optional[Any]:
with self.assertRaises(UpperCAmelCase ):
# config is in subfolder, the following should not work without specifying the subfolder
__lowerCAmelCase: List[Any] = BertConfig.from_pretrained('hf-internal-testing/tiny-random-bert-subfolder' )
__lowerCAmelCase: List[str] = BertConfig.from_pretrained('hf-internal-testing/tiny-random-bert-subfolder' , subfolder='bert' )
self.assertIsNotNone(UpperCAmelCase )
def UpperCAmelCase ( self : Tuple ) -> List[Any]:
# A mock response for an HTTP head request to emulate server down
__lowerCAmelCase: Union[str, Any] = mock.Mock()
__lowerCAmelCase: str = 5_0_0
__lowerCAmelCase: Optional[Any] = {}
__lowerCAmelCase: Optional[int] = HTTPError
__lowerCAmelCase: List[Any] = {}
# Download this model to make sure it's in the cache.
__lowerCAmelCase: Tuple = BertConfig.from_pretrained('hf-internal-testing/tiny-random-bert' )
# Under the mock environment we get a 500 error when trying to reach the model.
with mock.patch('requests.Session.request' , return_value=UpperCAmelCase ) as mock_head:
__lowerCAmelCase: Union[str, Any] = BertConfig.from_pretrained('hf-internal-testing/tiny-random-bert' )
# This check we did call the fake head request
mock_head.assert_called()
def UpperCAmelCase ( self : Any ) -> Optional[Any]:
# This test is for deprecated behavior and can be removed in v5
__lowerCAmelCase: Tuple = BertConfig.from_pretrained(
'https://huggingface.co/hf-internal-testing/tiny-random-bert/resolve/main/config.json' )
def UpperCAmelCase ( self : Dict ) -> str:
__lowerCAmelCase: Optional[Any] = AutoConfig.from_pretrained('bert-base-cased' )
__lowerCAmelCase: Optional[Any] = ['config.4.0.0.json']
with tempfile.TemporaryDirectory() as tmp_dir:
configuration.save_pretrained(UpperCAmelCase )
__lowerCAmelCase: Tuple = 2
json.dump(configuration.to_dict() , open(os.path.join(UpperCAmelCase , 'config.4.0.0.json' ) , 'w' ) )
# This should pick the new configuration file as the version of Transformers is > 4.0.0
__lowerCAmelCase: Dict = AutoConfig.from_pretrained(UpperCAmelCase )
self.assertEqual(new_configuration.hidden_size , 2 )
# Will need to be adjusted if we reach v42 and this test is still here.
# Should pick the old configuration file as the version of Transformers is < 4.42.0
__lowerCAmelCase: Dict = ['config.42.0.0.json']
__lowerCAmelCase: Optional[int] = 7_6_8
configuration.save_pretrained(UpperCAmelCase )
shutil.move(os.path.join(UpperCAmelCase , 'config.4.0.0.json' ) , os.path.join(UpperCAmelCase , 'config.42.0.0.json' ) )
__lowerCAmelCase: int = AutoConfig.from_pretrained(UpperCAmelCase )
self.assertEqual(new_configuration.hidden_size , 7_6_8 )
def UpperCAmelCase ( self : Union[str, Any] ) -> Dict:
# This repo has two configuration files, one for v4.0.0 and above with a different hidden size.
__lowerCAmelCase: Tuple = 'hf-internal-testing/test-two-configs'
import transformers as new_transformers
__lowerCAmelCase: List[Any] = 'v4.0.0'
__lowerCAmelCase , __lowerCAmelCase: Any = new_transformers.models.auto.AutoConfig.from_pretrained(
UpperCAmelCase , return_unused_kwargs=UpperCAmelCase )
self.assertEqual(new_configuration.hidden_size , 2 )
# This checks `_configuration_file` ia not kept in the kwargs by mistake.
self.assertDictEqual(UpperCAmelCase , {} )
# Testing an older version by monkey-patching the version in the module it's used.
import transformers as old_transformers
__lowerCAmelCase: List[Any] = 'v3.0.0'
__lowerCAmelCase: Union[str, Any] = old_transformers.models.auto.AutoConfig.from_pretrained(UpperCAmelCase )
self.assertEqual(old_configuration.hidden_size , 7_6_8 )
| 322 | 1 |
from __future__ import annotations
_a = '''Muhammad Umer Farooq'''
_a = '''MIT'''
_a = '''1.0.0'''
_a = '''Muhammad Umer Farooq'''
_a = '''[email protected]'''
_a = '''Alpha'''
import re
from html.parser import HTMLParser
from urllib import parse
import requests
class A_ ( snake_case__ ):
def __init__( self : List[Any] , UpperCAmelCase : str ) -> None:
super().__init__()
__lowerCAmelCase: list[str] = []
__lowerCAmelCase: List[str] = domain
def UpperCAmelCase ( self : Tuple , UpperCAmelCase : str , UpperCAmelCase : list[tuple[str, str | None]] ) -> None:
# Only parse the 'anchor' tag.
if tag == "a":
# Check the list of defined attributes.
for name, value in attrs:
# If href is defined, and not empty nor # print it.
if name == "href" and value != "#" and value != "":
# If not already in urls.
if value not in self.urls:
__lowerCAmelCase: List[str] = parse.urljoin(self.domain , UpperCAmelCase )
self.urls.append(UpperCAmelCase )
def _a ( SCREAMING_SNAKE_CASE : str ) -> str:
"""simple docstring"""
return ".".join(get_sub_domain_name(SCREAMING_SNAKE_CASE ).split('.' )[-2:] )
def _a ( SCREAMING_SNAKE_CASE : str ) -> str:
"""simple docstring"""
return parse.urlparse(SCREAMING_SNAKE_CASE ).netloc
def _a ( SCREAMING_SNAKE_CASE : str = "https://github.com" ) -> list[str]:
"""simple docstring"""
__lowerCAmelCase: Union[str, Any] = get_domain_name(SCREAMING_SNAKE_CASE )
# Initialize the parser
__lowerCAmelCase: List[str] = Parser(SCREAMING_SNAKE_CASE )
try:
# Open URL
__lowerCAmelCase: Union[str, Any] = requests.get(SCREAMING_SNAKE_CASE )
# pass the raw HTML to the parser to get links
parser.feed(r.text )
# Get links and loop through
__lowerCAmelCase: List[Any] = set()
for link in parser.urls:
# open URL.
# read = requests.get(link)
try:
__lowerCAmelCase: Optional[Any] = requests.get(SCREAMING_SNAKE_CASE )
# Get the valid email.
__lowerCAmelCase: Optional[Any] = re.findall('[a-zA-Z0-9]+@' + domain , read.text )
# If not in list then append it.
for email in emails:
valid_emails.add(SCREAMING_SNAKE_CASE )
except ValueError:
pass
except ValueError:
raise SystemExit(1 )
# Finally return a sorted list of email addresses with no duplicates.
return sorted(SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
_a = emails_from_url('''https://github.com''')
print(f"{len(emails)} emails found:")
print('''\n'''.join(sorted(emails)))
| 322 |
_a = [sum(int(c, 1_0) ** 2 for c in i.__str__()) for i in range(1_0_0_0_0_0)]
def _a ( SCREAMING_SNAKE_CASE : int ) -> int:
"""simple docstring"""
__lowerCAmelCase: Optional[int] = 0
while number:
# Increased Speed Slightly by checking every 5 digits together.
sum_of_digits_squared += DIGITS_SQUARED[number % 10_00_00]
number //= 10_00_00
return sum_of_digits_squared
# There are 2 Chains made,
# One ends with 89 with the chain member 58 being the one which when declared first,
# there will be the least number of iterations for all the members to be checked.
# The other one ends with 1 and has only one element 1.
# So 58 and 1 are chosen to be declared at the starting.
# Changed dictionary to an array to quicken the solution
_a = [None] * 1_0_0_0_0_0_0_0
_a = True
_a = False
def _a ( SCREAMING_SNAKE_CASE : int ) -> bool:
"""simple docstring"""
if CHAINS[number - 1] is not None:
return CHAINS[number - 1] # type: ignore
__lowerCAmelCase: int = chain(next_number(SCREAMING_SNAKE_CASE ) )
__lowerCAmelCase: Tuple = number_chain
while number < 10_00_00_00:
__lowerCAmelCase: Dict = number_chain
number *= 10
return number_chain
def _a ( SCREAMING_SNAKE_CASE : int = 10_00_00_00 ) -> int:
"""simple docstring"""
for i in range(1 , SCREAMING_SNAKE_CASE ):
if CHAINS[i] is None:
chain(i + 1 )
return CHAINS[:number].count(SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
import doctest
doctest.testmod()
print(f"{solution() = }")
| 322 | 1 |
import tempfile
import torch
from diffusers import (
DEISMultistepScheduler,
DPMSolverMultistepScheduler,
DPMSolverSinglestepScheduler,
UniPCMultistepScheduler,
)
from .test_schedulers import SchedulerCommonTest
class A_ ( snake_case__ ):
_lowercase : int = (DPMSolverSinglestepScheduler,)
_lowercase : Optional[Any] = (('num_inference_steps', 2_5),)
def UpperCAmelCase ( self : Dict , **UpperCAmelCase : List[Any] ) -> Optional[Any]:
__lowerCAmelCase: Union[str, Any] = {
'num_train_timesteps': 1_0_0_0,
'beta_start': 0.0001,
'beta_end': 0.02,
'beta_schedule': 'linear',
'solver_order': 2,
'prediction_type': 'epsilon',
'thresholding': False,
'sample_max_value': 1.0,
'algorithm_type': 'dpmsolver++',
'solver_type': 'midpoint',
'lambda_min_clipped': -float('inf' ),
'variance_type': None,
}
config.update(**UpperCAmelCase )
return config
def UpperCAmelCase ( self : str , UpperCAmelCase : List[Any]=0 , **UpperCAmelCase : str ) -> Any:
__lowerCAmelCase: Optional[int] = dict(self.forward_default_kwargs )
__lowerCAmelCase: int = kwargs.pop('num_inference_steps' , UpperCAmelCase )
__lowerCAmelCase: int = self.dummy_sample
__lowerCAmelCase: Union[str, Any] = 0.1 * sample
__lowerCAmelCase: str = [residual + 0.2, residual + 0.15, residual + 0.10]
for scheduler_class in self.scheduler_classes:
__lowerCAmelCase: Union[str, Any] = self.get_scheduler_config(**UpperCAmelCase )
__lowerCAmelCase: Optional[Any] = scheduler_class(**UpperCAmelCase )
scheduler.set_timesteps(UpperCAmelCase )
# copy over dummy past residuals
__lowerCAmelCase: Optional[int] = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(UpperCAmelCase )
__lowerCAmelCase: Dict = scheduler_class.from_pretrained(UpperCAmelCase )
new_scheduler.set_timesteps(UpperCAmelCase )
# copy over dummy past residuals
__lowerCAmelCase: Optional[int] = dummy_past_residuals[: new_scheduler.config.solver_order]
__lowerCAmelCase , __lowerCAmelCase: Optional[int] = sample, sample
for t in range(UpperCAmelCase , time_step + scheduler.config.solver_order + 1 ):
__lowerCAmelCase: str = scheduler.step(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , **UpperCAmelCase ).prev_sample
__lowerCAmelCase: str = new_scheduler.step(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , **UpperCAmelCase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def UpperCAmelCase ( self : str ) -> str:
pass
def UpperCAmelCase ( self : Optional[Any] , UpperCAmelCase : Any=0 , **UpperCAmelCase : Optional[int] ) -> Tuple:
__lowerCAmelCase: Tuple = dict(self.forward_default_kwargs )
__lowerCAmelCase: Tuple = kwargs.pop('num_inference_steps' , UpperCAmelCase )
__lowerCAmelCase: Tuple = self.dummy_sample
__lowerCAmelCase: Union[str, Any] = 0.1 * sample
__lowerCAmelCase: Tuple = [residual + 0.2, residual + 0.15, residual + 0.10]
for scheduler_class in self.scheduler_classes:
__lowerCAmelCase: Dict = self.get_scheduler_config()
__lowerCAmelCase: Any = scheduler_class(**UpperCAmelCase )
scheduler.set_timesteps(UpperCAmelCase )
# copy over dummy past residuals (must be after setting timesteps)
__lowerCAmelCase: List[Any] = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(UpperCAmelCase )
__lowerCAmelCase: List[str] = scheduler_class.from_pretrained(UpperCAmelCase )
# copy over dummy past residuals
new_scheduler.set_timesteps(UpperCAmelCase )
# copy over dummy past residual (must be after setting timesteps)
__lowerCAmelCase: Optional[Any] = dummy_past_residuals[: new_scheduler.config.solver_order]
__lowerCAmelCase: Any = scheduler.step(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , **UpperCAmelCase ).prev_sample
__lowerCAmelCase: Dict = new_scheduler.step(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , **UpperCAmelCase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def UpperCAmelCase ( self : int , UpperCAmelCase : Dict=None , **UpperCAmelCase : List[str] ) -> Union[str, Any]:
if scheduler is None:
__lowerCAmelCase: str = self.scheduler_classes[0]
__lowerCAmelCase: int = self.get_scheduler_config(**UpperCAmelCase )
__lowerCAmelCase: Any = scheduler_class(**UpperCAmelCase )
__lowerCAmelCase: List[Any] = self.scheduler_classes[0]
__lowerCAmelCase: List[str] = self.get_scheduler_config(**UpperCAmelCase )
__lowerCAmelCase: Optional[Any] = scheduler_class(**UpperCAmelCase )
__lowerCAmelCase: List[Any] = 1_0
__lowerCAmelCase: Dict = self.dummy_model()
__lowerCAmelCase: Dict = self.dummy_sample_deter
scheduler.set_timesteps(UpperCAmelCase )
for i, t in enumerate(scheduler.timesteps ):
__lowerCAmelCase: Dict = model(UpperCAmelCase , UpperCAmelCase )
__lowerCAmelCase: List[Any] = scheduler.step(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ).prev_sample
return sample
def UpperCAmelCase ( self : List[str] ) -> Union[str, Any]:
__lowerCAmelCase: List[str] = DPMSolverSinglestepScheduler(**self.get_scheduler_config() )
__lowerCAmelCase: Any = 5_0
__lowerCAmelCase: int = self.dummy_model()
__lowerCAmelCase: List[str] = self.dummy_sample_deter
scheduler.set_timesteps(UpperCAmelCase )
# make sure that the first t is uneven
for i, t in enumerate(scheduler.timesteps[3:] ):
__lowerCAmelCase: List[Any] = model(UpperCAmelCase , UpperCAmelCase )
__lowerCAmelCase: List[Any] = scheduler.step(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ).prev_sample
__lowerCAmelCase: Optional[int] = torch.mean(torch.abs(UpperCAmelCase ) )
assert abs(result_mean.item() - 0.2574 ) < 1E-3
def UpperCAmelCase ( self : Optional[int] ) -> Dict:
for timesteps in [2_5, 5_0, 1_0_0, 9_9_9, 1_0_0_0]:
self.check_over_configs(num_train_timesteps=UpperCAmelCase )
def UpperCAmelCase ( self : Optional[Any] ) -> Any:
# make sure that iterating over schedulers with same config names gives same results
# for defaults
__lowerCAmelCase: List[str] = DPMSolverSinglestepScheduler(**self.get_scheduler_config() )
__lowerCAmelCase: Dict = self.full_loop(scheduler=UpperCAmelCase )
__lowerCAmelCase: Optional[Any] = torch.mean(torch.abs(UpperCAmelCase ) )
assert abs(result_mean.item() - 0.2791 ) < 1E-3
__lowerCAmelCase: Tuple = DEISMultistepScheduler.from_config(scheduler.config )
__lowerCAmelCase: List[str] = DPMSolverMultistepScheduler.from_config(scheduler.config )
__lowerCAmelCase: Any = UniPCMultistepScheduler.from_config(scheduler.config )
__lowerCAmelCase: Optional[int] = DPMSolverSinglestepScheduler.from_config(scheduler.config )
__lowerCAmelCase: Union[str, Any] = self.full_loop(scheduler=UpperCAmelCase )
__lowerCAmelCase: List[Any] = torch.mean(torch.abs(UpperCAmelCase ) )
assert abs(result_mean.item() - 0.2791 ) < 1E-3
def UpperCAmelCase ( self : List[str] ) -> List[str]:
self.check_over_configs(thresholding=UpperCAmelCase )
for order in [1, 2, 3]:
for solver_type in ["midpoint", "heun"]:
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
thresholding=UpperCAmelCase , prediction_type=UpperCAmelCase , sample_max_value=UpperCAmelCase , algorithm_type='dpmsolver++' , solver_order=UpperCAmelCase , solver_type=UpperCAmelCase , )
def UpperCAmelCase ( self : Any ) -> Union[str, Any]:
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=UpperCAmelCase )
def UpperCAmelCase ( self : Tuple ) -> str:
for algorithm_type in ["dpmsolver", "dpmsolver++"]:
for solver_type in ["midpoint", "heun"]:
for order in [1, 2, 3]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
solver_order=UpperCAmelCase , solver_type=UpperCAmelCase , prediction_type=UpperCAmelCase , algorithm_type=UpperCAmelCase , )
__lowerCAmelCase: Dict = self.full_loop(
solver_order=UpperCAmelCase , solver_type=UpperCAmelCase , prediction_type=UpperCAmelCase , algorithm_type=UpperCAmelCase , )
assert not torch.isnan(UpperCAmelCase ).any(), "Samples have nan numbers"
def UpperCAmelCase ( self : Optional[Any] ) -> str:
self.check_over_configs(lower_order_final=UpperCAmelCase )
self.check_over_configs(lower_order_final=UpperCAmelCase )
def UpperCAmelCase ( self : str ) -> Any:
self.check_over_configs(lambda_min_clipped=-float('inf' ) )
self.check_over_configs(lambda_min_clipped=-5.1 )
def UpperCAmelCase ( self : List[Any] ) -> str:
self.check_over_configs(variance_type=UpperCAmelCase )
self.check_over_configs(variance_type='learned_range' )
def UpperCAmelCase ( self : Union[str, Any] ) -> List[Any]:
for num_inference_steps in [1, 2, 3, 5, 1_0, 5_0, 1_0_0, 9_9_9, 1_0_0_0]:
self.check_over_forward(num_inference_steps=UpperCAmelCase , time_step=0 )
def UpperCAmelCase ( self : Any ) -> int:
__lowerCAmelCase: Any = self.full_loop()
__lowerCAmelCase: Tuple = torch.mean(torch.abs(UpperCAmelCase ) )
assert abs(result_mean.item() - 0.2791 ) < 1E-3
def UpperCAmelCase ( self : Any ) -> Union[str, Any]:
__lowerCAmelCase: List[str] = self.full_loop(use_karras_sigmas=UpperCAmelCase )
__lowerCAmelCase: str = torch.mean(torch.abs(UpperCAmelCase ) )
assert abs(result_mean.item() - 0.2248 ) < 1E-3
def UpperCAmelCase ( self : Dict ) -> Optional[Any]:
__lowerCAmelCase: Tuple = self.full_loop(prediction_type='v_prediction' )
__lowerCAmelCase: List[str] = torch.mean(torch.abs(UpperCAmelCase ) )
assert abs(result_mean.item() - 0.1453 ) < 1E-3
def UpperCAmelCase ( self : str ) -> List[str]:
__lowerCAmelCase: int = self.full_loop(prediction_type='v_prediction' , use_karras_sigmas=UpperCAmelCase )
__lowerCAmelCase: Tuple = torch.mean(torch.abs(UpperCAmelCase ) )
assert abs(result_mean.item() - 0.0649 ) < 1E-3
def UpperCAmelCase ( self : Optional[Any] ) -> Optional[Any]:
__lowerCAmelCase: Any = self.scheduler_classes[0]
__lowerCAmelCase: Optional[Any] = self.get_scheduler_config(thresholding=UpperCAmelCase , dynamic_thresholding_ratio=0 )
__lowerCAmelCase: List[str] = scheduler_class(**UpperCAmelCase )
__lowerCAmelCase: Optional[int] = 1_0
__lowerCAmelCase: Union[str, Any] = self.dummy_model()
__lowerCAmelCase: int = self.dummy_sample_deter.half()
scheduler.set_timesteps(UpperCAmelCase )
for i, t in enumerate(scheduler.timesteps ):
__lowerCAmelCase: Any = model(UpperCAmelCase , UpperCAmelCase )
__lowerCAmelCase: Any = scheduler.step(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ).prev_sample
assert sample.dtype == torch.floataa
| 322 |
def _a ( SCREAMING_SNAKE_CASE : int ) -> bool:
"""simple docstring"""
if not isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
__lowerCAmelCase: List[Any] = f'''Input value of [number={number}] must be an integer'''
raise TypeError(SCREAMING_SNAKE_CASE )
if number < 0:
return False
__lowerCAmelCase: str = number * number
while number > 0:
if number % 10 != number_square % 10:
return False
number //= 10
number_square //= 10
return True
if __name__ == "__main__":
import doctest
doctest.testmod()
| 322 | 1 |
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class A_ ( snake_case__ ):
_lowercase : int = ['image_processor', 'tokenizer']
_lowercase : List[str] = 'ChineseCLIPImageProcessor'
_lowercase : Optional[Any] = ('BertTokenizer', 'BertTokenizerFast')
def __init__( self : Dict , UpperCAmelCase : Optional[Any]=None , UpperCAmelCase : List[Any]=None , **UpperCAmelCase : Any ) -> Any:
__lowerCAmelCase: str = None
if "feature_extractor" in kwargs:
warnings.warn(
'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'
' instead.' , UpperCAmelCase , )
__lowerCAmelCase: Dict = kwargs.pop('feature_extractor' )
__lowerCAmelCase: List[str] = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('You need to specify an `image_processor`.' )
if tokenizer is None:
raise ValueError('You need to specify a `tokenizer`.' )
super().__init__(UpperCAmelCase , UpperCAmelCase )
__lowerCAmelCase: Optional[Any] = self.image_processor
def __call__( self : Any , UpperCAmelCase : Optional[int]=None , UpperCAmelCase : str=None , UpperCAmelCase : Optional[Any]=None , **UpperCAmelCase : str ) -> Union[str, Any]:
if text is None and images is None:
raise ValueError('You have to specify either text or images. Both cannot be none.' )
if text is not None:
__lowerCAmelCase: List[str] = self.tokenizer(UpperCAmelCase , return_tensors=UpperCAmelCase , **UpperCAmelCase )
if images is not None:
__lowerCAmelCase: int = self.image_processor(UpperCAmelCase , return_tensors=UpperCAmelCase , **UpperCAmelCase )
if text is not None and images is not None:
__lowerCAmelCase: Dict = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**UpperCAmelCase ) , tensor_type=UpperCAmelCase )
def UpperCAmelCase ( self : str , *UpperCAmelCase : List[str] , **UpperCAmelCase : List[Any] ) -> Optional[Any]:
return self.tokenizer.batch_decode(*UpperCAmelCase , **UpperCAmelCase )
def UpperCAmelCase ( self : List[str] , *UpperCAmelCase : Optional[Any] , **UpperCAmelCase : str ) -> Optional[Any]:
return self.tokenizer.decode(*UpperCAmelCase , **UpperCAmelCase )
@property
def UpperCAmelCase ( self : Any ) -> str:
__lowerCAmelCase: str = self.tokenizer.model_input_names
__lowerCAmelCase: List[Any] = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
@property
def UpperCAmelCase ( self : Dict ) -> Tuple:
warnings.warn(
'`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.' , UpperCAmelCase , )
return self.image_processor_class
| 322 |
import unittest
from transformers import XLMConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
XLMForMultipleChoice,
XLMForQuestionAnswering,
XLMForQuestionAnsweringSimple,
XLMForSequenceClassification,
XLMForTokenClassification,
XLMModel,
XLMWithLMHeadModel,
)
from transformers.models.xlm.modeling_xlm import XLM_PRETRAINED_MODEL_ARCHIVE_LIST
class A_ :
def __init__( self : Any , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : str=1_3 , UpperCAmelCase : Optional[Any]=7 , UpperCAmelCase : str=True , UpperCAmelCase : Any=True , UpperCAmelCase : Tuple=True , UpperCAmelCase : Any=True , UpperCAmelCase : Optional[int]=True , UpperCAmelCase : List[str]=False , UpperCAmelCase : Tuple=False , UpperCAmelCase : int=False , UpperCAmelCase : Optional[int]=2 , UpperCAmelCase : Any=9_9 , UpperCAmelCase : str=0 , UpperCAmelCase : Dict=3_2 , UpperCAmelCase : int=5 , UpperCAmelCase : Optional[int]=4 , UpperCAmelCase : Any=0.1 , UpperCAmelCase : str=0.1 , UpperCAmelCase : int=5_1_2 , UpperCAmelCase : str=2 , UpperCAmelCase : Optional[int]=0.02 , UpperCAmelCase : Optional[Any]=2 , UpperCAmelCase : List[str]=4 , UpperCAmelCase : Dict="last" , UpperCAmelCase : int=True , UpperCAmelCase : Dict=None , UpperCAmelCase : Union[str, Any]=0 , ) -> Dict:
__lowerCAmelCase: Optional[int] = parent
__lowerCAmelCase: Dict = batch_size
__lowerCAmelCase: Tuple = seq_length
__lowerCAmelCase: Tuple = is_training
__lowerCAmelCase: Optional[Any] = use_input_lengths
__lowerCAmelCase: List[str] = use_token_type_ids
__lowerCAmelCase: Dict = use_labels
__lowerCAmelCase: int = gelu_activation
__lowerCAmelCase: Optional[int] = sinusoidal_embeddings
__lowerCAmelCase: Tuple = causal
__lowerCAmelCase: Optional[Any] = asm
__lowerCAmelCase: int = n_langs
__lowerCAmelCase: Tuple = vocab_size
__lowerCAmelCase: List[Any] = n_special
__lowerCAmelCase: List[Any] = hidden_size
__lowerCAmelCase: Union[str, Any] = num_hidden_layers
__lowerCAmelCase: Dict = num_attention_heads
__lowerCAmelCase: int = hidden_dropout_prob
__lowerCAmelCase: List[str] = attention_probs_dropout_prob
__lowerCAmelCase: Dict = max_position_embeddings
__lowerCAmelCase: List[str] = type_sequence_label_size
__lowerCAmelCase: str = initializer_range
__lowerCAmelCase: List[str] = num_labels
__lowerCAmelCase: List[str] = num_choices
__lowerCAmelCase: Optional[int] = summary_type
__lowerCAmelCase: Any = use_proj
__lowerCAmelCase: Optional[Any] = scope
__lowerCAmelCase: Dict = bos_token_id
def UpperCAmelCase ( self : Union[str, Any] ) -> Tuple:
__lowerCAmelCase: Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__lowerCAmelCase: str = random_attention_mask([self.batch_size, self.seq_length] )
__lowerCAmelCase: Any = None
if self.use_input_lengths:
__lowerCAmelCase: Optional[Any] = (
ids_tensor([self.batch_size] , vocab_size=2 ) + self.seq_length - 2
) # small variation of seq_length
__lowerCAmelCase: str = None
if self.use_token_type_ids:
__lowerCAmelCase: Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.n_langs )
__lowerCAmelCase: int = None
__lowerCAmelCase: Optional[int] = None
__lowerCAmelCase: Optional[int] = None
if self.use_labels:
__lowerCAmelCase: Tuple = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__lowerCAmelCase: Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__lowerCAmelCase: Optional[int] = ids_tensor([self.batch_size] , 2 ).float()
__lowerCAmelCase: str = ids_tensor([self.batch_size] , self.num_choices )
__lowerCAmelCase: Dict = self.get_config()
return (
config,
input_ids,
token_type_ids,
input_lengths,
sequence_labels,
token_labels,
is_impossible_labels,
choice_labels,
input_mask,
)
def UpperCAmelCase ( self : Tuple ) -> List[Any]:
return XLMConfig(
vocab_size=self.vocab_size , n_special=self.n_special , emb_dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , gelu_activation=self.gelu_activation , sinusoidal_embeddings=self.sinusoidal_embeddings , asm=self.asm , causal=self.causal , n_langs=self.n_langs , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , summary_type=self.summary_type , use_proj=self.use_proj , num_labels=self.num_labels , bos_token_id=self.bos_token_id , )
def UpperCAmelCase ( self : int , UpperCAmelCase : Optional[Any] , UpperCAmelCase : Tuple , UpperCAmelCase : Optional[int] , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : Dict , UpperCAmelCase : Optional[Any] , UpperCAmelCase : int , UpperCAmelCase : int , UpperCAmelCase : List[str] , ) -> Optional[int]:
__lowerCAmelCase: List[str] = XLMModel(config=UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
__lowerCAmelCase: Any = model(UpperCAmelCase , lengths=UpperCAmelCase , langs=UpperCAmelCase )
__lowerCAmelCase: List[str] = model(UpperCAmelCase , langs=UpperCAmelCase )
__lowerCAmelCase: List[str] = model(UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCAmelCase ( self : Union[str, Any] , UpperCAmelCase : Optional[Any] , UpperCAmelCase : Optional[int] , UpperCAmelCase : Optional[int] , UpperCAmelCase : Dict , UpperCAmelCase : Any , UpperCAmelCase : List[Any] , UpperCAmelCase : Tuple , UpperCAmelCase : Tuple , UpperCAmelCase : Dict , ) -> int:
__lowerCAmelCase: str = XLMWithLMHeadModel(UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
__lowerCAmelCase: Union[str, Any] = model(UpperCAmelCase , token_type_ids=UpperCAmelCase , labels=UpperCAmelCase )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCAmelCase ( self : Optional[int] , UpperCAmelCase : Optional[int] , UpperCAmelCase : Dict , UpperCAmelCase : Optional[int] , UpperCAmelCase : Any , UpperCAmelCase : Optional[int] , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : List[str] , UpperCAmelCase : str , UpperCAmelCase : Dict , ) -> List[str]:
__lowerCAmelCase: Dict = XLMForQuestionAnsweringSimple(UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
__lowerCAmelCase: str = model(UpperCAmelCase )
__lowerCAmelCase: List[str] = model(UpperCAmelCase , start_positions=UpperCAmelCase , end_positions=UpperCAmelCase )
__lowerCAmelCase: Optional[Any] = outputs
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def UpperCAmelCase ( self : List[str] , UpperCAmelCase : Dict , UpperCAmelCase : List[Any] , UpperCAmelCase : Optional[int] , UpperCAmelCase : Optional[Any] , UpperCAmelCase : str , UpperCAmelCase : List[str] , UpperCAmelCase : List[Any] , UpperCAmelCase : Any , UpperCAmelCase : Optional[int] , ) -> Tuple:
__lowerCAmelCase: Union[str, Any] = XLMForQuestionAnswering(UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
__lowerCAmelCase: List[str] = model(UpperCAmelCase )
__lowerCAmelCase: Union[str, Any] = model(
UpperCAmelCase , start_positions=UpperCAmelCase , end_positions=UpperCAmelCase , cls_index=UpperCAmelCase , is_impossible=UpperCAmelCase , p_mask=UpperCAmelCase , )
__lowerCAmelCase: Any = model(
UpperCAmelCase , start_positions=UpperCAmelCase , end_positions=UpperCAmelCase , cls_index=UpperCAmelCase , is_impossible=UpperCAmelCase , )
((__lowerCAmelCase) , ): List[str] = result_with_labels.to_tuple()
__lowerCAmelCase: Union[str, Any] = model(UpperCAmelCase , start_positions=UpperCAmelCase , end_positions=UpperCAmelCase )
((__lowerCAmelCase) , ): List[Any] = result_with_labels.to_tuple()
self.parent.assertEqual(result_with_labels.loss.shape , () )
self.parent.assertEqual(result.start_top_log_probs.shape , (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(result.start_top_index.shape , (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(
result.end_top_log_probs.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(
result.end_top_index.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(result.cls_logits.shape , (self.batch_size,) )
def UpperCAmelCase ( self : Dict , UpperCAmelCase : Dict , UpperCAmelCase : Optional[Any] , UpperCAmelCase : Optional[Any] , UpperCAmelCase : List[str] , UpperCAmelCase : List[str] , UpperCAmelCase : Any , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : str , UpperCAmelCase : List[str] , ) -> List[Any]:
__lowerCAmelCase: Optional[Any] = XLMForSequenceClassification(UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
__lowerCAmelCase: List[Any] = model(UpperCAmelCase )
__lowerCAmelCase: Tuple = model(UpperCAmelCase , labels=UpperCAmelCase )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def UpperCAmelCase ( self : Optional[Any] , UpperCAmelCase : str , UpperCAmelCase : int , UpperCAmelCase : str , UpperCAmelCase : Tuple , UpperCAmelCase : Optional[Any] , UpperCAmelCase : List[str] , UpperCAmelCase : List[str] , UpperCAmelCase : Dict , UpperCAmelCase : Union[str, Any] , ) -> List[Any]:
__lowerCAmelCase: Union[str, Any] = self.num_labels
__lowerCAmelCase: Tuple = XLMForTokenClassification(UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
__lowerCAmelCase: Optional[int] = model(UpperCAmelCase , attention_mask=UpperCAmelCase , labels=UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def UpperCAmelCase ( self : str , UpperCAmelCase : Tuple , UpperCAmelCase : str , UpperCAmelCase : Any , UpperCAmelCase : str , UpperCAmelCase : str , UpperCAmelCase : str , UpperCAmelCase : str , UpperCAmelCase : List[str] , UpperCAmelCase : List[Any] , ) -> Union[str, Any]:
__lowerCAmelCase: List[Any] = self.num_choices
__lowerCAmelCase: Optional[Any] = XLMForMultipleChoice(config=UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
__lowerCAmelCase: List[Any] = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__lowerCAmelCase: List[str] = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__lowerCAmelCase: Dict = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__lowerCAmelCase: Any = model(
UpperCAmelCase , attention_mask=UpperCAmelCase , token_type_ids=UpperCAmelCase , labels=UpperCAmelCase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def UpperCAmelCase ( self : Tuple ) -> int:
__lowerCAmelCase: Optional[Any] = self.prepare_config_and_inputs()
(
(
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) ,
): Union[str, Any] = config_and_inputs
__lowerCAmelCase: Any = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'lengths': input_lengths}
return config, inputs_dict
@require_torch
class A_ ( snake_case__ , snake_case__ , snake_case__ , unittest.TestCase ):
_lowercase : Any = (
(
XLMModel,
XLMWithLMHeadModel,
XLMForQuestionAnswering,
XLMForSequenceClassification,
XLMForQuestionAnsweringSimple,
XLMForTokenClassification,
XLMForMultipleChoice,
)
if is_torch_available()
else ()
)
_lowercase : Any = (
(XLMWithLMHeadModel,) if is_torch_available() else ()
) # TODO (PVP): Check other models whether language generation is also applicable
_lowercase : Optional[int] = (
{
'feature-extraction': XLMModel,
'fill-mask': XLMWithLMHeadModel,
'question-answering': XLMForQuestionAnsweringSimple,
'text-classification': XLMForSequenceClassification,
'text-generation': XLMWithLMHeadModel,
'token-classification': XLMForTokenClassification,
'zero-shot': XLMForSequenceClassification,
}
if is_torch_available()
else {}
)
def UpperCAmelCase ( self : Any , UpperCAmelCase : List[Any] , UpperCAmelCase : Optional[Any] , UpperCAmelCase : str , UpperCAmelCase : str , UpperCAmelCase : str ) -> int:
if (
pipeline_test_casse_name == "QAPipelineTests"
and tokenizer_name is not None
and not tokenizer_name.endswith('Fast' )
):
# `QAPipelineTests` fails for a few models when the slower tokenizer are used.
# (The slower tokenizers were never used for pipeline tests before the pipeline testing rework)
# TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer
return True
return False
def UpperCAmelCase ( self : Any , UpperCAmelCase : Optional[Any] , UpperCAmelCase : Tuple , UpperCAmelCase : Tuple=False ) -> Dict:
__lowerCAmelCase: Optional[Any] = super()._prepare_for_class(UpperCAmelCase , UpperCAmelCase , return_labels=UpperCAmelCase )
if return_labels:
if model_class.__name__ == "XLMForQuestionAnswering":
__lowerCAmelCase: str = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=UpperCAmelCase )
__lowerCAmelCase: Optional[Any] = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=UpperCAmelCase )
return inputs_dict
def UpperCAmelCase ( self : Union[str, Any] ) -> int:
__lowerCAmelCase: int = XLMModelTester(self )
__lowerCAmelCase: Optional[int] = ConfigTester(self , config_class=UpperCAmelCase , emb_dim=3_7 )
def UpperCAmelCase ( self : List[str] ) -> List[Any]:
self.config_tester.run_common_tests()
def UpperCAmelCase ( self : Dict ) -> List[Any]:
__lowerCAmelCase: str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_model(*UpperCAmelCase )
def UpperCAmelCase ( self : List[Any] ) -> int:
__lowerCAmelCase: Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_lm_head(*UpperCAmelCase )
def UpperCAmelCase ( self : Tuple ) -> Tuple:
__lowerCAmelCase: Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_simple_qa(*UpperCAmelCase )
def UpperCAmelCase ( self : Optional[Any] ) -> Tuple:
__lowerCAmelCase: Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_qa(*UpperCAmelCase )
def UpperCAmelCase ( self : Optional[Any] ) -> Any:
__lowerCAmelCase: List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_sequence_classif(*UpperCAmelCase )
def UpperCAmelCase ( self : Tuple ) -> Tuple:
__lowerCAmelCase: Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_token_classif(*UpperCAmelCase )
def UpperCAmelCase ( self : Any ) -> Any:
__lowerCAmelCase: Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_for_multiple_choice(*UpperCAmelCase )
def UpperCAmelCase ( self : Optional[Any] , UpperCAmelCase : int , UpperCAmelCase : Tuple , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : Dict , UpperCAmelCase : Optional[Any] , UpperCAmelCase : Optional[int]=False , UpperCAmelCase : Dict=1 ) -> Dict:
self.assertIsInstance(UpperCAmelCase , UpperCAmelCase )
self.assertListEqual(
[isinstance(UpperCAmelCase , UpperCAmelCase ) for iter_attentions in attentions] , [True] * len(UpperCAmelCase ) )
self.assertEqual(len(UpperCAmelCase ) , (max_length - min_length) * num_beam_groups )
for idx, iter_attentions in enumerate(UpperCAmelCase ):
# adds PAD dummy token
__lowerCAmelCase: int = min_length + idx + 1
__lowerCAmelCase: Union[str, Any] = min_length + idx + 1
__lowerCAmelCase: Any = (
batch_size * num_beam_groups,
config.num_attention_heads,
tgt_len,
src_len,
)
# check attn size
self.assertListEqual(
[layer_attention.shape for layer_attention in iter_attentions] , [expected_shape] * len(UpperCAmelCase ) )
def UpperCAmelCase ( self : Dict , UpperCAmelCase : int , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : Any , UpperCAmelCase : Optional[Any] , UpperCAmelCase : str=False , UpperCAmelCase : Optional[int]=1 ) -> Union[str, Any]:
self.assertIsInstance(UpperCAmelCase , UpperCAmelCase )
self.assertListEqual(
[isinstance(UpperCAmelCase , UpperCAmelCase ) for iter_hidden_states in hidden_states] , [True] * len(UpperCAmelCase ) , )
self.assertEqual(len(UpperCAmelCase ) , (max_length - min_length) * num_beam_groups )
for idx, iter_hidden_states in enumerate(UpperCAmelCase ):
# adds PAD dummy token
__lowerCAmelCase: Any = min_length + idx + 1
__lowerCAmelCase: str = (batch_size * num_beam_groups, seq_len, config.hidden_size)
# check hidden size
self.assertListEqual(
[layer_hidden_states.shape for layer_hidden_states in iter_hidden_states] , [expected_shape] * len(UpperCAmelCase ) , )
pass
@slow
def UpperCAmelCase ( self : int ) -> Tuple:
for model_name in XLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowerCAmelCase: List[Any] = XLMModel.from_pretrained(UpperCAmelCase )
self.assertIsNotNone(UpperCAmelCase )
@require_torch
class A_ ( unittest.TestCase ):
@slow
def UpperCAmelCase ( self : Tuple ) -> Union[str, Any]:
__lowerCAmelCase: Union[str, Any] = XLMWithLMHeadModel.from_pretrained('xlm-mlm-en-2048' )
model.to(UpperCAmelCase )
__lowerCAmelCase: Optional[int] = torch.tensor([[1_4, 4_4_7]] , dtype=torch.long , device=UpperCAmelCase ) # the president
__lowerCAmelCase: Union[str, Any] = [
1_4,
4_4_7,
1_4,
4_4_7,
1_4,
4_4_7,
1_4,
4_4_7,
1_4,
4_4_7,
1_4,
4_4_7,
1_4,
4_4_7,
1_4,
4_4_7,
1_4,
4_4_7,
1_4,
4_4_7,
] # the president the president the president the president the president the president the president the president the president the president
# TODO(PVP): this and other input_ids I tried for generation give pretty bad results. Not sure why. Model might just not be made for auto-regressive inference
__lowerCAmelCase: str = model.generate(UpperCAmelCase , do_sample=UpperCAmelCase )
self.assertListEqual(output_ids[0].cpu().numpy().tolist() , UpperCAmelCase )
| 322 | 1 |
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_fnet import FNetTokenizer
else:
_a = None
_a = logging.get_logger(__name__)
_a = {'''vocab_file''': '''spiece.model''', '''tokenizer_file''': '''tokenizer.json'''}
_a = {
'''vocab_file''': {
'''google/fnet-base''': '''https://huggingface.co/google/fnet-base/resolve/main/spiece.model''',
'''google/fnet-large''': '''https://huggingface.co/google/fnet-large/resolve/main/spiece.model''',
},
'''tokenizer_file''': {
'''google/fnet-base''': '''https://huggingface.co/google/fnet-base/resolve/main/tokenizer.json''',
'''google/fnet-large''': '''https://huggingface.co/google/fnet-large/resolve/main/tokenizer.json''',
},
}
_a = {
'''google/fnet-base''': 5_1_2,
'''google/fnet-large''': 5_1_2,
}
_a = '''▁'''
class A_ ( snake_case__ ):
_lowercase : Tuple = VOCAB_FILES_NAMES
_lowercase : Any = PRETRAINED_VOCAB_FILES_MAP
_lowercase : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_lowercase : Optional[Any] = ['input_ids', 'token_type_ids']
_lowercase : int = FNetTokenizer
def __init__( self : Dict , UpperCAmelCase : int=None , UpperCAmelCase : List[Any]=None , UpperCAmelCase : str=False , UpperCAmelCase : int=True , UpperCAmelCase : str=True , UpperCAmelCase : Tuple="<unk>" , UpperCAmelCase : Any="[SEP]" , UpperCAmelCase : Optional[Any]="<pad>" , UpperCAmelCase : Union[str, Any]="[CLS]" , UpperCAmelCase : List[str]="[MASK]" , **UpperCAmelCase : Optional[Any] , ) -> Tuple:
# Mask token behave like a normal word, i.e. include the space before it and
# is included in the raw text, there should be a match in a non-normalized sentence.
__lowerCAmelCase: Optional[Any] = (
AddedToken(UpperCAmelCase , lstrip=UpperCAmelCase , rstrip=UpperCAmelCase , normalized=UpperCAmelCase )
if isinstance(UpperCAmelCase , UpperCAmelCase )
else mask_token
)
super().__init__(
UpperCAmelCase , tokenizer_file=UpperCAmelCase , do_lower_case=UpperCAmelCase , remove_space=UpperCAmelCase , keep_accents=UpperCAmelCase , unk_token=UpperCAmelCase , sep_token=UpperCAmelCase , pad_token=UpperCAmelCase , cls_token=UpperCAmelCase , mask_token=UpperCAmelCase , **UpperCAmelCase , )
__lowerCAmelCase: str = do_lower_case
__lowerCAmelCase: Union[str, Any] = remove_space
__lowerCAmelCase: Union[str, Any] = keep_accents
__lowerCAmelCase: Dict = vocab_file
__lowerCAmelCase: Tuple = False if not self.vocab_file else True
def UpperCAmelCase ( self : List[str] , UpperCAmelCase : List[int] , UpperCAmelCase : Optional[List[int]] = None ) -> List[int]:
__lowerCAmelCase: Dict = [self.sep_token_id]
__lowerCAmelCase: List[str] = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def UpperCAmelCase ( self : List[str] , UpperCAmelCase : List[int] , UpperCAmelCase : Optional[List[int]] = None ) -> List[int]:
__lowerCAmelCase: int = [self.sep_token_id]
__lowerCAmelCase: Tuple = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def UpperCAmelCase ( self : Any , UpperCAmelCase : str , UpperCAmelCase : Optional[str] = None ) -> Tuple[str]:
if not os.path.isdir(UpperCAmelCase ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
__lowerCAmelCase: Optional[Any] = os.path.join(
UpperCAmelCase , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCAmelCase ):
copyfile(self.vocab_file , UpperCAmelCase )
return (out_vocab_file,)
| 322 |
def _a ( SCREAMING_SNAKE_CASE : Optional[int] ) -> Optional[int]:
"""simple docstring"""
__lowerCAmelCase: List[Any] = 0
__lowerCAmelCase: Optional[int] = len(SCREAMING_SNAKE_CASE )
for i in range(n - 1 ):
for j in range(i + 1 , SCREAMING_SNAKE_CASE ):
if arr[i] > arr[j]:
num_inversions += 1
return num_inversions
def _a ( SCREAMING_SNAKE_CASE : Any ) -> str:
"""simple docstring"""
if len(SCREAMING_SNAKE_CASE ) <= 1:
return arr, 0
__lowerCAmelCase: str = len(SCREAMING_SNAKE_CASE ) // 2
__lowerCAmelCase: str = arr[0:mid]
__lowerCAmelCase: int = arr[mid:]
__lowerCAmelCase , __lowerCAmelCase: List[Any] = count_inversions_recursive(SCREAMING_SNAKE_CASE )
__lowerCAmelCase , __lowerCAmelCase: Dict = count_inversions_recursive(SCREAMING_SNAKE_CASE )
__lowerCAmelCase , __lowerCAmelCase: int = _count_cross_inversions(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
__lowerCAmelCase: int = inversion_p + inversions_q + cross_inversions
return c, num_inversions
def _a ( SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : Any ) -> Optional[int]:
"""simple docstring"""
__lowerCAmelCase: List[str] = []
__lowerCAmelCase: List[str] = 0
while i < len(SCREAMING_SNAKE_CASE ) and j < len(SCREAMING_SNAKE_CASE ):
if p[i] > q[j]:
# if P[1] > Q[j], then P[k] > Q[k] for all i < k <= len(P)
# These are all inversions. The claim emerges from the
# property that P is sorted.
num_inversion += len(SCREAMING_SNAKE_CASE ) - i
r.append(q[j] )
j += 1
else:
r.append(p[i] )
i += 1
if i < len(SCREAMING_SNAKE_CASE ):
r.extend(p[i:] )
else:
r.extend(q[j:] )
return r, num_inversion
def _a ( ) -> int:
"""simple docstring"""
__lowerCAmelCase: List[Any] = [10, 2, 1, 5, 5, 2, 11]
# this arr has 8 inversions:
# (10, 2), (10, 1), (10, 5), (10, 5), (10, 2), (2, 1), (5, 2), (5, 2)
__lowerCAmelCase: Tuple = count_inversions_bf(SCREAMING_SNAKE_CASE )
__lowerCAmelCase , __lowerCAmelCase: str = count_inversions_recursive(SCREAMING_SNAKE_CASE )
assert num_inversions_bf == num_inversions_recursive == 8
print('number of inversions = ' , SCREAMING_SNAKE_CASE )
# testing an array with zero inversion (a sorted arr_1)
arr_a.sort()
__lowerCAmelCase: Tuple = count_inversions_bf(SCREAMING_SNAKE_CASE )
__lowerCAmelCase , __lowerCAmelCase: Optional[Any] = count_inversions_recursive(SCREAMING_SNAKE_CASE )
assert num_inversions_bf == num_inversions_recursive == 0
print('number of inversions = ' , SCREAMING_SNAKE_CASE )
# an empty list should also have zero inversions
__lowerCAmelCase: int = []
__lowerCAmelCase: Any = count_inversions_bf(SCREAMING_SNAKE_CASE )
__lowerCAmelCase , __lowerCAmelCase: Dict = count_inversions_recursive(SCREAMING_SNAKE_CASE )
assert num_inversions_bf == num_inversions_recursive == 0
print('number of inversions = ' , SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
main()
| 322 | 1 |
from __future__ import annotations
import time
import numpy as np
_a = [8, 5, 9, 7]
_a = [
[2, 0, 1, 1],
[0, 1, 2, 1],
[4, 0, 0, 3],
[0, 2, 1, 0],
[1, 0, 3, 0],
]
_a = [
[3, 2, 1, 4],
[0, 2, 5, 2],
[5, 1, 0, 5],
[1, 5, 3, 0],
[3, 0, 3, 3],
]
class A_ :
def __init__( self : Dict , UpperCAmelCase : list[int] , UpperCAmelCase : list[list[int]] , UpperCAmelCase : list[list[int]] , ) -> None:
__lowerCAmelCase: Optional[Any] = claim_vector
__lowerCAmelCase: int = allocated_resources_table
__lowerCAmelCase: List[str] = maximum_claim_table
def UpperCAmelCase ( self : List[str] ) -> list[int]:
return [
sum(p_item[i] for p_item in self.__allocated_resources_table )
for i in range(len(self.__allocated_resources_table[0] ) )
]
def UpperCAmelCase ( self : Tuple ) -> list[int]:
return np.array(self.__claim_vector ) - np.array(
self.__processes_resource_summation() )
def UpperCAmelCase ( self : Tuple ) -> list[list[int]]:
return [
list(np.array(self.__maximum_claim_table[i] ) - np.array(UpperCAmelCase ) )
for i, allocated_resource in enumerate(self.__allocated_resources_table )
]
def UpperCAmelCase ( self : List[Any] ) -> dict[int, list[int]]:
return {self.__need().index(UpperCAmelCase ): i for i in self.__need()}
def UpperCAmelCase ( self : List[str] , **UpperCAmelCase : str ) -> None:
__lowerCAmelCase: Optional[Any] = self.__need()
__lowerCAmelCase: List[Any] = self.__allocated_resources_table
__lowerCAmelCase: Dict = self.__available_resources()
__lowerCAmelCase: Optional[Any] = self.__need_index_manager()
for kw, val in kwargs.items():
if kw and val is True:
self.__pretty_data()
print('_' * 5_0 + '\n' )
while need_list:
__lowerCAmelCase: Union[str, Any] = False
for each_need in need_list:
__lowerCAmelCase: List[Any] = True
for index, need in enumerate(UpperCAmelCase ):
if need > available_resources[index]:
__lowerCAmelCase: List[Any] = False
break
if execution:
__lowerCAmelCase: str = True
# get the original index of the process from ind_ctrl db
for original_need_index, need_clone in need_index_manager.items():
if each_need == need_clone:
__lowerCAmelCase: List[Any] = original_need_index
print(F'''Process {process_number + 1} is executing.''' )
# remove the process run from stack
need_list.remove(UpperCAmelCase )
# update available/freed resources stack
__lowerCAmelCase: Optional[Any] = np.array(UpperCAmelCase ) + np.array(
alloc_resources_table[process_number] )
print(
'Updated available resource stack for processes: '
+ ' '.join([str(UpperCAmelCase ) for x in available_resources] ) )
break
if safe:
print('The process is in a safe state.\n' )
else:
print('System in unsafe state. Aborting...\n' )
break
def UpperCAmelCase ( self : List[Any] ) -> List[Any]:
print(' ' * 9 + 'Allocated Resource Table' )
for item in self.__allocated_resources_table:
print(
F'''P{self.__allocated_resources_table.index(UpperCAmelCase ) + 1}'''
+ ' '.join(F'''{it:>8}''' for it in item )
+ '\n' )
print(' ' * 9 + 'System Resource Table' )
for item in self.__maximum_claim_table:
print(
F'''P{self.__maximum_claim_table.index(UpperCAmelCase ) + 1}'''
+ ' '.join(F'''{it:>8}''' for it in item )
+ '\n' )
print(
'Current Usage by Active Processes: '
+ ' '.join(str(UpperCAmelCase ) for x in self.__claim_vector ) )
print(
'Initial Available Resources: '
+ ' '.join(str(UpperCAmelCase ) for x in self.__available_resources() ) )
time.sleep(1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 322 |
import tempfile
import torch
from diffusers import (
DEISMultistepScheduler,
DPMSolverMultistepScheduler,
DPMSolverSinglestepScheduler,
UniPCMultistepScheduler,
)
from .test_schedulers import SchedulerCommonTest
class A_ ( snake_case__ ):
_lowercase : int = (DPMSolverSinglestepScheduler,)
_lowercase : Optional[Any] = (('num_inference_steps', 2_5),)
def UpperCAmelCase ( self : Dict , **UpperCAmelCase : List[Any] ) -> Optional[Any]:
__lowerCAmelCase: Union[str, Any] = {
'num_train_timesteps': 1_0_0_0,
'beta_start': 0.0001,
'beta_end': 0.02,
'beta_schedule': 'linear',
'solver_order': 2,
'prediction_type': 'epsilon',
'thresholding': False,
'sample_max_value': 1.0,
'algorithm_type': 'dpmsolver++',
'solver_type': 'midpoint',
'lambda_min_clipped': -float('inf' ),
'variance_type': None,
}
config.update(**UpperCAmelCase )
return config
def UpperCAmelCase ( self : str , UpperCAmelCase : List[Any]=0 , **UpperCAmelCase : str ) -> Any:
__lowerCAmelCase: Optional[int] = dict(self.forward_default_kwargs )
__lowerCAmelCase: int = kwargs.pop('num_inference_steps' , UpperCAmelCase )
__lowerCAmelCase: int = self.dummy_sample
__lowerCAmelCase: Union[str, Any] = 0.1 * sample
__lowerCAmelCase: str = [residual + 0.2, residual + 0.15, residual + 0.10]
for scheduler_class in self.scheduler_classes:
__lowerCAmelCase: Union[str, Any] = self.get_scheduler_config(**UpperCAmelCase )
__lowerCAmelCase: Optional[Any] = scheduler_class(**UpperCAmelCase )
scheduler.set_timesteps(UpperCAmelCase )
# copy over dummy past residuals
__lowerCAmelCase: Optional[int] = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(UpperCAmelCase )
__lowerCAmelCase: Dict = scheduler_class.from_pretrained(UpperCAmelCase )
new_scheduler.set_timesteps(UpperCAmelCase )
# copy over dummy past residuals
__lowerCAmelCase: Optional[int] = dummy_past_residuals[: new_scheduler.config.solver_order]
__lowerCAmelCase , __lowerCAmelCase: Optional[int] = sample, sample
for t in range(UpperCAmelCase , time_step + scheduler.config.solver_order + 1 ):
__lowerCAmelCase: str = scheduler.step(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , **UpperCAmelCase ).prev_sample
__lowerCAmelCase: str = new_scheduler.step(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , **UpperCAmelCase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def UpperCAmelCase ( self : str ) -> str:
pass
def UpperCAmelCase ( self : Optional[Any] , UpperCAmelCase : Any=0 , **UpperCAmelCase : Optional[int] ) -> Tuple:
__lowerCAmelCase: Tuple = dict(self.forward_default_kwargs )
__lowerCAmelCase: Tuple = kwargs.pop('num_inference_steps' , UpperCAmelCase )
__lowerCAmelCase: Tuple = self.dummy_sample
__lowerCAmelCase: Union[str, Any] = 0.1 * sample
__lowerCAmelCase: Tuple = [residual + 0.2, residual + 0.15, residual + 0.10]
for scheduler_class in self.scheduler_classes:
__lowerCAmelCase: Dict = self.get_scheduler_config()
__lowerCAmelCase: Any = scheduler_class(**UpperCAmelCase )
scheduler.set_timesteps(UpperCAmelCase )
# copy over dummy past residuals (must be after setting timesteps)
__lowerCAmelCase: List[Any] = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(UpperCAmelCase )
__lowerCAmelCase: List[str] = scheduler_class.from_pretrained(UpperCAmelCase )
# copy over dummy past residuals
new_scheduler.set_timesteps(UpperCAmelCase )
# copy over dummy past residual (must be after setting timesteps)
__lowerCAmelCase: Optional[Any] = dummy_past_residuals[: new_scheduler.config.solver_order]
__lowerCAmelCase: Any = scheduler.step(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , **UpperCAmelCase ).prev_sample
__lowerCAmelCase: Dict = new_scheduler.step(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , **UpperCAmelCase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def UpperCAmelCase ( self : int , UpperCAmelCase : Dict=None , **UpperCAmelCase : List[str] ) -> Union[str, Any]:
if scheduler is None:
__lowerCAmelCase: str = self.scheduler_classes[0]
__lowerCAmelCase: int = self.get_scheduler_config(**UpperCAmelCase )
__lowerCAmelCase: Any = scheduler_class(**UpperCAmelCase )
__lowerCAmelCase: List[Any] = self.scheduler_classes[0]
__lowerCAmelCase: List[str] = self.get_scheduler_config(**UpperCAmelCase )
__lowerCAmelCase: Optional[Any] = scheduler_class(**UpperCAmelCase )
__lowerCAmelCase: List[Any] = 1_0
__lowerCAmelCase: Dict = self.dummy_model()
__lowerCAmelCase: Dict = self.dummy_sample_deter
scheduler.set_timesteps(UpperCAmelCase )
for i, t in enumerate(scheduler.timesteps ):
__lowerCAmelCase: Dict = model(UpperCAmelCase , UpperCAmelCase )
__lowerCAmelCase: List[Any] = scheduler.step(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ).prev_sample
return sample
def UpperCAmelCase ( self : List[str] ) -> Union[str, Any]:
__lowerCAmelCase: List[str] = DPMSolverSinglestepScheduler(**self.get_scheduler_config() )
__lowerCAmelCase: Any = 5_0
__lowerCAmelCase: int = self.dummy_model()
__lowerCAmelCase: List[str] = self.dummy_sample_deter
scheduler.set_timesteps(UpperCAmelCase )
# make sure that the first t is uneven
for i, t in enumerate(scheduler.timesteps[3:] ):
__lowerCAmelCase: List[Any] = model(UpperCAmelCase , UpperCAmelCase )
__lowerCAmelCase: List[Any] = scheduler.step(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ).prev_sample
__lowerCAmelCase: Optional[int] = torch.mean(torch.abs(UpperCAmelCase ) )
assert abs(result_mean.item() - 0.2574 ) < 1E-3
def UpperCAmelCase ( self : Optional[int] ) -> Dict:
for timesteps in [2_5, 5_0, 1_0_0, 9_9_9, 1_0_0_0]:
self.check_over_configs(num_train_timesteps=UpperCAmelCase )
def UpperCAmelCase ( self : Optional[Any] ) -> Any:
# make sure that iterating over schedulers with same config names gives same results
# for defaults
__lowerCAmelCase: List[str] = DPMSolverSinglestepScheduler(**self.get_scheduler_config() )
__lowerCAmelCase: Dict = self.full_loop(scheduler=UpperCAmelCase )
__lowerCAmelCase: Optional[Any] = torch.mean(torch.abs(UpperCAmelCase ) )
assert abs(result_mean.item() - 0.2791 ) < 1E-3
__lowerCAmelCase: Tuple = DEISMultistepScheduler.from_config(scheduler.config )
__lowerCAmelCase: List[str] = DPMSolverMultistepScheduler.from_config(scheduler.config )
__lowerCAmelCase: Any = UniPCMultistepScheduler.from_config(scheduler.config )
__lowerCAmelCase: Optional[int] = DPMSolverSinglestepScheduler.from_config(scheduler.config )
__lowerCAmelCase: Union[str, Any] = self.full_loop(scheduler=UpperCAmelCase )
__lowerCAmelCase: List[Any] = torch.mean(torch.abs(UpperCAmelCase ) )
assert abs(result_mean.item() - 0.2791 ) < 1E-3
def UpperCAmelCase ( self : List[str] ) -> List[str]:
self.check_over_configs(thresholding=UpperCAmelCase )
for order in [1, 2, 3]:
for solver_type in ["midpoint", "heun"]:
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
thresholding=UpperCAmelCase , prediction_type=UpperCAmelCase , sample_max_value=UpperCAmelCase , algorithm_type='dpmsolver++' , solver_order=UpperCAmelCase , solver_type=UpperCAmelCase , )
def UpperCAmelCase ( self : Any ) -> Union[str, Any]:
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=UpperCAmelCase )
def UpperCAmelCase ( self : Tuple ) -> str:
for algorithm_type in ["dpmsolver", "dpmsolver++"]:
for solver_type in ["midpoint", "heun"]:
for order in [1, 2, 3]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
solver_order=UpperCAmelCase , solver_type=UpperCAmelCase , prediction_type=UpperCAmelCase , algorithm_type=UpperCAmelCase , )
__lowerCAmelCase: Dict = self.full_loop(
solver_order=UpperCAmelCase , solver_type=UpperCAmelCase , prediction_type=UpperCAmelCase , algorithm_type=UpperCAmelCase , )
assert not torch.isnan(UpperCAmelCase ).any(), "Samples have nan numbers"
def UpperCAmelCase ( self : Optional[Any] ) -> str:
self.check_over_configs(lower_order_final=UpperCAmelCase )
self.check_over_configs(lower_order_final=UpperCAmelCase )
def UpperCAmelCase ( self : str ) -> Any:
self.check_over_configs(lambda_min_clipped=-float('inf' ) )
self.check_over_configs(lambda_min_clipped=-5.1 )
def UpperCAmelCase ( self : List[Any] ) -> str:
self.check_over_configs(variance_type=UpperCAmelCase )
self.check_over_configs(variance_type='learned_range' )
def UpperCAmelCase ( self : Union[str, Any] ) -> List[Any]:
for num_inference_steps in [1, 2, 3, 5, 1_0, 5_0, 1_0_0, 9_9_9, 1_0_0_0]:
self.check_over_forward(num_inference_steps=UpperCAmelCase , time_step=0 )
def UpperCAmelCase ( self : Any ) -> int:
__lowerCAmelCase: Any = self.full_loop()
__lowerCAmelCase: Tuple = torch.mean(torch.abs(UpperCAmelCase ) )
assert abs(result_mean.item() - 0.2791 ) < 1E-3
def UpperCAmelCase ( self : Any ) -> Union[str, Any]:
__lowerCAmelCase: List[str] = self.full_loop(use_karras_sigmas=UpperCAmelCase )
__lowerCAmelCase: str = torch.mean(torch.abs(UpperCAmelCase ) )
assert abs(result_mean.item() - 0.2248 ) < 1E-3
def UpperCAmelCase ( self : Dict ) -> Optional[Any]:
__lowerCAmelCase: Tuple = self.full_loop(prediction_type='v_prediction' )
__lowerCAmelCase: List[str] = torch.mean(torch.abs(UpperCAmelCase ) )
assert abs(result_mean.item() - 0.1453 ) < 1E-3
def UpperCAmelCase ( self : str ) -> List[str]:
__lowerCAmelCase: int = self.full_loop(prediction_type='v_prediction' , use_karras_sigmas=UpperCAmelCase )
__lowerCAmelCase: Tuple = torch.mean(torch.abs(UpperCAmelCase ) )
assert abs(result_mean.item() - 0.0649 ) < 1E-3
def UpperCAmelCase ( self : Optional[Any] ) -> Optional[Any]:
__lowerCAmelCase: Any = self.scheduler_classes[0]
__lowerCAmelCase: Optional[Any] = self.get_scheduler_config(thresholding=UpperCAmelCase , dynamic_thresholding_ratio=0 )
__lowerCAmelCase: List[str] = scheduler_class(**UpperCAmelCase )
__lowerCAmelCase: Optional[int] = 1_0
__lowerCAmelCase: Union[str, Any] = self.dummy_model()
__lowerCAmelCase: int = self.dummy_sample_deter.half()
scheduler.set_timesteps(UpperCAmelCase )
for i, t in enumerate(scheduler.timesteps ):
__lowerCAmelCase: Any = model(UpperCAmelCase , UpperCAmelCase )
__lowerCAmelCase: Any = scheduler.step(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ).prev_sample
assert sample.dtype == torch.floataa
| 322 | 1 |
import inspect
import unittest
from transformers import BitConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import BitBackbone, BitForImageClassification, BitImageProcessor, BitModel
from transformers.models.bit.modeling_bit import BIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
class A_ :
def __init__( self : Any , UpperCAmelCase : Optional[int] , UpperCAmelCase : str=3 , UpperCAmelCase : Optional[Any]=3_2 , UpperCAmelCase : Union[str, Any]=3 , UpperCAmelCase : Optional[Any]=1_0 , UpperCAmelCase : int=[8, 1_6, 3_2, 6_4] , UpperCAmelCase : Any=[1, 1, 2, 1] , UpperCAmelCase : str=True , UpperCAmelCase : int=True , UpperCAmelCase : List[str]="relu" , UpperCAmelCase : List[Any]=3 , UpperCAmelCase : Dict=None , UpperCAmelCase : int=["stage2", "stage3", "stage4"] , UpperCAmelCase : Optional[Any]=[2, 3, 4] , UpperCAmelCase : Any=1 , ) -> Optional[int]:
__lowerCAmelCase: List[Any] = parent
__lowerCAmelCase: Optional[int] = batch_size
__lowerCAmelCase: Optional[int] = image_size
__lowerCAmelCase: List[str] = num_channels
__lowerCAmelCase: Any = embeddings_size
__lowerCAmelCase: Optional[Any] = hidden_sizes
__lowerCAmelCase: Union[str, Any] = depths
__lowerCAmelCase: Any = is_training
__lowerCAmelCase: List[str] = use_labels
__lowerCAmelCase: Tuple = hidden_act
__lowerCAmelCase: Tuple = num_labels
__lowerCAmelCase: List[str] = scope
__lowerCAmelCase: str = len(UpperCAmelCase )
__lowerCAmelCase: Tuple = out_features
__lowerCAmelCase: Tuple = out_indices
__lowerCAmelCase: str = num_groups
def UpperCAmelCase ( self : Optional[int] ) -> str:
__lowerCAmelCase: str = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__lowerCAmelCase: Tuple = None
if self.use_labels:
__lowerCAmelCase: str = ids_tensor([self.batch_size] , self.num_labels )
__lowerCAmelCase: str = self.get_config()
return config, pixel_values, labels
def UpperCAmelCase ( self : Tuple ) -> Dict:
return BitConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , out_features=self.out_features , out_indices=self.out_indices , num_groups=self.num_groups , )
def UpperCAmelCase ( self : int , UpperCAmelCase : Optional[Any] , UpperCAmelCase : Dict , UpperCAmelCase : Union[str, Any] ) -> str:
__lowerCAmelCase: str = BitModel(config=UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
__lowerCAmelCase: Tuple = model(UpperCAmelCase )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 3_2, self.image_size // 3_2) , )
def UpperCAmelCase ( self : Union[str, Any] , UpperCAmelCase : Any , UpperCAmelCase : List[str] , UpperCAmelCase : List[Any] ) -> Union[str, Any]:
__lowerCAmelCase: List[str] = self.num_labels
__lowerCAmelCase: int = BitForImageClassification(UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
__lowerCAmelCase: str = model(UpperCAmelCase , labels=UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def UpperCAmelCase ( self : int , UpperCAmelCase : List[Any] , UpperCAmelCase : Optional[Any] , UpperCAmelCase : Union[str, Any] ) -> Optional[Any]:
__lowerCAmelCase: Optional[Any] = BitBackbone(config=UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
__lowerCAmelCase: Any = model(UpperCAmelCase )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[1], 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , config.hidden_sizes[1:] )
# verify backbone works with out_features=None
__lowerCAmelCase: str = None
__lowerCAmelCase: Union[str, Any] = BitBackbone(config=UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
__lowerCAmelCase: int = model(UpperCAmelCase )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[-1], 1, 1] )
# verify channels
self.parent.assertEqual(len(model.channels ) , 1 )
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] )
def UpperCAmelCase ( self : Union[str, Any] ) -> List[str]:
__lowerCAmelCase: List[str] = self.prepare_config_and_inputs()
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase: str = config_and_inputs
__lowerCAmelCase: List[str] = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class A_ ( snake_case__ , snake_case__ , unittest.TestCase ):
_lowercase : int = (BitModel, BitForImageClassification, BitBackbone) if is_torch_available() else ()
_lowercase : int = (
{'feature-extraction': BitModel, 'image-classification': BitForImageClassification}
if is_torch_available()
else {}
)
_lowercase : List[str] = False
_lowercase : Optional[Any] = False
_lowercase : Union[str, Any] = False
_lowercase : Union[str, Any] = False
_lowercase : Union[str, Any] = False
def UpperCAmelCase ( self : Union[str, Any] ) -> int:
__lowerCAmelCase: Optional[int] = BitModelTester(self )
__lowerCAmelCase: Optional[int] = ConfigTester(self , config_class=UpperCAmelCase , has_text_modality=UpperCAmelCase )
def UpperCAmelCase ( self : Union[str, Any] ) -> Optional[Any]:
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def UpperCAmelCase ( self : Dict ) -> str:
return
@unittest.skip(reason='Bit does not output attentions' )
def UpperCAmelCase ( self : int ) -> Optional[Any]:
pass
@unittest.skip(reason='Bit does not use inputs_embeds' )
def UpperCAmelCase ( self : Any ) -> Optional[int]:
pass
@unittest.skip(reason='Bit does not support input and output embeddings' )
def UpperCAmelCase ( self : Any ) -> List[str]:
pass
def UpperCAmelCase ( self : List[str] ) -> Dict:
__lowerCAmelCase , __lowerCAmelCase: Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowerCAmelCase: List[Any] = model_class(UpperCAmelCase )
__lowerCAmelCase: Union[str, Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__lowerCAmelCase: Any = [*signature.parameters.keys()]
__lowerCAmelCase: Union[str, Any] = ['pixel_values']
self.assertListEqual(arg_names[:1] , UpperCAmelCase )
def UpperCAmelCase ( self : Dict ) -> Optional[Any]:
__lowerCAmelCase: Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCAmelCase )
def UpperCAmelCase ( self : Optional[Any] ) -> Optional[int]:
__lowerCAmelCase: Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*UpperCAmelCase )
def UpperCAmelCase ( self : str ) -> Optional[Any]:
__lowerCAmelCase , __lowerCAmelCase: Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowerCAmelCase: Dict = model_class(config=UpperCAmelCase )
for name, module in model.named_modules():
if isinstance(UpperCAmelCase , (nn.BatchNormad, nn.GroupNorm) ):
self.assertTrue(
torch.all(module.weight == 1 ) , msg=F'''Parameter {name} of model {model_class} seems not properly initialized''' , )
self.assertTrue(
torch.all(module.bias == 0 ) , msg=F'''Parameter {name} of model {model_class} seems not properly initialized''' , )
def UpperCAmelCase ( self : List[Any] ) -> List[Any]:
def check_hidden_states_output(UpperCAmelCase : Dict , UpperCAmelCase : Any , UpperCAmelCase : Optional[int] ):
__lowerCAmelCase: Tuple = model_class(UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
with torch.no_grad():
__lowerCAmelCase: Union[str, Any] = model(**self._prepare_for_class(UpperCAmelCase , UpperCAmelCase ) )
__lowerCAmelCase: Union[str, Any] = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
__lowerCAmelCase: Any = self.model_tester.num_stages
self.assertEqual(len(UpperCAmelCase ) , expected_num_stages + 1 )
# Bit's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
__lowerCAmelCase , __lowerCAmelCase: str = self.model_tester.prepare_config_and_inputs_for_common()
__lowerCAmelCase: Any = ['preactivation', 'bottleneck']
for model_class in self.all_model_classes:
for layer_type in layers_type:
__lowerCAmelCase: List[str] = layer_type
__lowerCAmelCase: Any = True
check_hidden_states_output(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__lowerCAmelCase: Tuple = True
check_hidden_states_output(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
@unittest.skip(reason='Bit does not use feedforward chunking' )
def UpperCAmelCase ( self : List[str] ) -> Union[str, Any]:
pass
def UpperCAmelCase ( self : Tuple ) -> Optional[Any]:
__lowerCAmelCase: List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*UpperCAmelCase )
@slow
def UpperCAmelCase ( self : Optional[Any] ) -> int:
for model_name in BIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowerCAmelCase: Dict = BitModel.from_pretrained(UpperCAmelCase )
self.assertIsNotNone(UpperCAmelCase )
def _a ( ) -> Optional[Any]:
"""simple docstring"""
__lowerCAmelCase: int = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class A_ ( unittest.TestCase ):
@cached_property
def UpperCAmelCase ( self : Optional[Any] ) -> str:
return (
BitImageProcessor.from_pretrained(BIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) if is_vision_available() else None
)
@slow
def UpperCAmelCase ( self : str ) -> List[Any]:
__lowerCAmelCase: Optional[int] = BitForImageClassification.from_pretrained(BIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(UpperCAmelCase )
__lowerCAmelCase: Optional[int] = self.default_image_processor
__lowerCAmelCase: Dict = prepare_img()
__lowerCAmelCase: int = image_processor(images=UpperCAmelCase , return_tensors='pt' ).to(UpperCAmelCase )
# forward pass
with torch.no_grad():
__lowerCAmelCase: Optional[Any] = model(**UpperCAmelCase )
# verify the logits
__lowerCAmelCase: Union[str, Any] = torch.Size((1, 1_0_0_0) )
self.assertEqual(outputs.logits.shape , UpperCAmelCase )
__lowerCAmelCase: Tuple = torch.tensor([[-0.6526, -0.5263, -1.4398]] ).to(UpperCAmelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , UpperCAmelCase , atol=1E-4 ) )
@require_torch
class A_ ( snake_case__ , unittest.TestCase ):
_lowercase : Any = (BitBackbone,) if is_torch_available() else ()
_lowercase : Any = BitConfig
_lowercase : Dict = False
def UpperCAmelCase ( self : Union[str, Any] ) -> Tuple:
__lowerCAmelCase: List[Any] = BitModelTester(self )
| 322 |
import re
import time
from typing import Optional
import IPython.display as disp
from ..trainer_callback import TrainerCallback
from ..trainer_utils import IntervalStrategy, has_length
def _a ( SCREAMING_SNAKE_CASE : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
__lowerCAmelCase: Union[str, Any] = int(SCREAMING_SNAKE_CASE )
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase: List[str] = t // 36_00, (t // 60) % 60, t % 60
return f'''{h}:{m:02d}:{s:02d}''' if h != 0 else f'''{m:02d}:{s:02d}'''
def _a ( SCREAMING_SNAKE_CASE : Dict , SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : Dict , SCREAMING_SNAKE_CASE : str=3_00 ) -> int:
"""simple docstring"""
return f'''
<div>
{prefix}
<progress value=\'{value}\' max=\'{total}\' style=\'width:{width}px; height:20px; vertical-align: middle;\'></progress>
{label}
</div>
'''
def _a ( SCREAMING_SNAKE_CASE : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
__lowerCAmelCase: List[str] = '<table border="1" class="dataframe">\n'
html_code += """ <thead>\n <tr style="text-align: left;">\n"""
for i in items[0]:
html_code += f''' <th>{i}</th>\n'''
html_code += " </tr>\n </thead>\n <tbody>\n"
for line in items[1:]:
html_code += " <tr>\n"
for elt in line:
__lowerCAmelCase: List[Any] = f'''{elt:.6f}''' if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) else str(SCREAMING_SNAKE_CASE )
html_code += f''' <td>{elt}</td>\n'''
html_code += " </tr>\n"
html_code += " </tbody>\n</table><p>"
return html_code
class A_ :
_lowercase : str = 5
_lowercase : str = 0.2
def __init__( self : Union[str, Any] , UpperCAmelCase : int , UpperCAmelCase : Optional[str] = None , UpperCAmelCase : bool = True , UpperCAmelCase : Optional["NotebookTrainingTracker"] = None , UpperCAmelCase : int = 3_0_0 , ) -> List[Any]:
__lowerCAmelCase: List[str] = total
__lowerCAmelCase: Optional[int] = '' if prefix is None else prefix
__lowerCAmelCase: int = leave
__lowerCAmelCase: List[str] = parent
__lowerCAmelCase: Optional[Any] = width
__lowerCAmelCase: List[str] = None
__lowerCAmelCase: Dict = None
__lowerCAmelCase: List[str] = None
def UpperCAmelCase ( self : Union[str, Any] , UpperCAmelCase : int , UpperCAmelCase : bool = False , UpperCAmelCase : str = None ) -> Optional[int]:
__lowerCAmelCase: int = value
if comment is not None:
__lowerCAmelCase: Any = comment
if self.last_value is None:
__lowerCAmelCase: List[Any] = time.time()
__lowerCAmelCase: Any = value
__lowerCAmelCase: List[str] = None
__lowerCAmelCase: Dict = self.warmup
__lowerCAmelCase: List[str] = 1
self.update_bar(UpperCAmelCase )
elif value <= self.last_value and not force_update:
return
elif force_update or self.first_calls > 0 or value >= min(self.last_value + self.wait_for , self.total ):
if self.first_calls > 0:
self.first_calls -= 1
__lowerCAmelCase: Union[str, Any] = time.time()
__lowerCAmelCase: str = current_time - self.start_time
# We could have value = self.start_value if the update is called twixe with the same start value.
if value > self.start_value:
__lowerCAmelCase: Dict = self.elapsed_time / (value - self.start_value)
else:
__lowerCAmelCase: int = None
if value >= self.total:
__lowerCAmelCase: Any = self.total
__lowerCAmelCase: str = None
if not self.leave:
self.close()
elif self.average_time_per_item is not None:
__lowerCAmelCase: List[str] = self.average_time_per_item * (self.total - value)
self.update_bar(UpperCAmelCase )
__lowerCAmelCase: Tuple = value
__lowerCAmelCase: int = current_time
if self.average_time_per_item is None:
__lowerCAmelCase: Optional[int] = 1
else:
__lowerCAmelCase: Optional[Any] = max(int(self.update_every / self.average_time_per_item ) , 1 )
def UpperCAmelCase ( self : int , UpperCAmelCase : Any , UpperCAmelCase : List[Any]=None ) -> Union[str, Any]:
__lowerCAmelCase: int = ' ' * (len(str(self.total ) ) - len(str(UpperCAmelCase ) )) + str(UpperCAmelCase )
if self.elapsed_time is None:
__lowerCAmelCase: Dict = F'''[{spaced_value}/{self.total} : < :'''
elif self.predicted_remaining is None:
__lowerCAmelCase: str = F'''[{spaced_value}/{self.total} {format_time(self.elapsed_time )}'''
else:
__lowerCAmelCase: Any = (
F'''[{spaced_value}/{self.total} {format_time(self.elapsed_time )} <'''
F''' {format_time(self.predicted_remaining )}'''
)
self.label += F''', {1/self.average_time_per_item:.2f} it/s'''
self.label += "]" if self.comment is None or len(self.comment ) == 0 else F''', {self.comment}]'''
self.display()
def UpperCAmelCase ( self : Any ) -> Optional[Any]:
__lowerCAmelCase: Any = html_progress_bar(self.value , self.total , self.prefix , self.label , self.width )
if self.parent is not None:
# If this is a child bar, the parent will take care of the display.
self.parent.display()
return
if self.output is None:
__lowerCAmelCase: Tuple = disp.display(disp.HTML(self.html_code ) , display_id=UpperCAmelCase )
else:
self.output.update(disp.HTML(self.html_code ) )
def UpperCAmelCase ( self : str ) -> Optional[Any]:
if self.parent is None and self.output is not None:
self.output.update(disp.HTML('' ) )
class A_ ( snake_case__ ):
def __init__( self : Tuple , UpperCAmelCase : Dict , UpperCAmelCase : List[Any]=None ) -> Any:
super().__init__(UpperCAmelCase )
__lowerCAmelCase: Tuple = None if column_names is None else [column_names]
__lowerCAmelCase: Union[str, Any] = None
def UpperCAmelCase ( self : Union[str, Any] ) -> Any:
__lowerCAmelCase: str = html_progress_bar(self.value , self.total , self.prefix , self.label , self.width )
if self.inner_table is not None:
self.html_code += text_to_html_table(self.inner_table )
if self.child_bar is not None:
self.html_code += self.child_bar.html_code
if self.output is None:
__lowerCAmelCase: Optional[Any] = disp.display(disp.HTML(self.html_code ) , display_id=UpperCAmelCase )
else:
self.output.update(disp.HTML(self.html_code ) )
def UpperCAmelCase ( self : Tuple , UpperCAmelCase : List[Any] ) -> Dict:
if self.inner_table is None:
__lowerCAmelCase: List[str] = [list(values.keys() ), list(values.values() )]
else:
__lowerCAmelCase: Any = self.inner_table[0]
if len(self.inner_table ) == 1:
# We give a chance to update the column names at the first iteration
for key in values.keys():
if key not in columns:
columns.append(UpperCAmelCase )
__lowerCAmelCase: List[Any] = columns
self.inner_table.append([values[c] for c in columns] )
def UpperCAmelCase ( self : Dict , UpperCAmelCase : int , UpperCAmelCase : List[Any]=None , UpperCAmelCase : List[str]=3_0_0 ) -> List[Any]:
__lowerCAmelCase: Union[str, Any] = NotebookProgressBar(UpperCAmelCase , prefix=UpperCAmelCase , parent=self , width=UpperCAmelCase )
return self.child_bar
def UpperCAmelCase ( self : Optional[int] ) -> Optional[int]:
__lowerCAmelCase: Tuple = None
self.display()
class A_ ( snake_case__ ):
def __init__( self : Any ) -> List[str]:
__lowerCAmelCase: int = None
__lowerCAmelCase: Optional[int] = None
__lowerCAmelCase: str = False
def UpperCAmelCase ( self : Tuple , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : Optional[int] , UpperCAmelCase : Any , **UpperCAmelCase : Tuple ) -> str:
__lowerCAmelCase: Tuple = 'Epoch' if args.evaluation_strategy == IntervalStrategy.EPOCH else 'Step'
__lowerCAmelCase: Optional[int] = 0
__lowerCAmelCase: Any = 0
__lowerCAmelCase: Tuple = [self.first_column] + ['Training Loss']
if args.evaluation_strategy != IntervalStrategy.NO:
column_names.append('Validation Loss' )
__lowerCAmelCase: List[Any] = NotebookTrainingTracker(state.max_steps , UpperCAmelCase )
def UpperCAmelCase ( self : Optional[int] , UpperCAmelCase : Tuple , UpperCAmelCase : Any , UpperCAmelCase : Optional[Any] , **UpperCAmelCase : Union[str, Any] ) -> Any:
__lowerCAmelCase: Union[str, Any] = int(state.epoch ) if int(state.epoch ) == state.epoch else F'''{state.epoch:.2f}'''
self.training_tracker.update(
state.global_step + 1 , comment=F'''Epoch {epoch}/{state.num_train_epochs}''' , force_update=self._force_next_update , )
__lowerCAmelCase: Any = False
def UpperCAmelCase ( self : List[Any] , UpperCAmelCase : str , UpperCAmelCase : Optional[int] , UpperCAmelCase : Optional[Any] , UpperCAmelCase : int=None , **UpperCAmelCase : Dict ) -> List[Any]:
if not has_length(UpperCAmelCase ):
return
if self.prediction_bar is None:
if self.training_tracker is not None:
__lowerCAmelCase: int = self.training_tracker.add_child(len(UpperCAmelCase ) )
else:
__lowerCAmelCase: List[str] = NotebookProgressBar(len(UpperCAmelCase ) )
self.prediction_bar.update(1 )
else:
self.prediction_bar.update(self.prediction_bar.value + 1 )
def UpperCAmelCase ( self : Union[str, Any] , UpperCAmelCase : Dict , UpperCAmelCase : Dict , UpperCAmelCase : Optional[Any] , **UpperCAmelCase : int ) -> Union[str, Any]:
if self.prediction_bar is not None:
self.prediction_bar.close()
__lowerCAmelCase: Any = None
def UpperCAmelCase ( self : str , UpperCAmelCase : Tuple , UpperCAmelCase : Optional[Any] , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : int=None , **UpperCAmelCase : Optional[Any] ) -> Optional[Any]:
# Only for when there is no evaluation
if args.evaluation_strategy == IntervalStrategy.NO and "loss" in logs:
__lowerCAmelCase: Union[str, Any] = {'Training Loss': logs['loss']}
# First column is necessarily Step sine we're not in epoch eval strategy
__lowerCAmelCase: Dict = state.global_step
self.training_tracker.write_line(UpperCAmelCase )
def UpperCAmelCase ( self : int , UpperCAmelCase : List[Any] , UpperCAmelCase : List[str] , UpperCAmelCase : Optional[int] , UpperCAmelCase : Tuple=None , **UpperCAmelCase : int ) -> List[str]:
if self.training_tracker is not None:
__lowerCAmelCase: Dict = {'Training Loss': 'No log', 'Validation Loss': 'No log'}
for log in reversed(state.log_history ):
if "loss" in log:
__lowerCAmelCase: List[str] = log['loss']
break
if self.first_column == "Epoch":
__lowerCAmelCase: int = int(state.epoch )
else:
__lowerCAmelCase: Tuple = state.global_step
__lowerCAmelCase: Optional[int] = 'eval'
for k in metrics:
if k.endswith('_loss' ):
__lowerCAmelCase: Union[str, Any] = re.sub(R'\_loss$' , '' , UpperCAmelCase )
__lowerCAmelCase: Optional[Any] = metrics.pop('total_flos' , UpperCAmelCase )
__lowerCAmelCase: str = metrics.pop('epoch' , UpperCAmelCase )
__lowerCAmelCase: int = metrics.pop(F'''{metric_key_prefix}_runtime''' , UpperCAmelCase )
__lowerCAmelCase: List[Any] = metrics.pop(F'''{metric_key_prefix}_samples_per_second''' , UpperCAmelCase )
__lowerCAmelCase: List[str] = metrics.pop(F'''{metric_key_prefix}_steps_per_second''' , UpperCAmelCase )
__lowerCAmelCase: Tuple = metrics.pop(F'''{metric_key_prefix}_jit_compilation_time''' , UpperCAmelCase )
for k, v in metrics.items():
if k == F'''{metric_key_prefix}_loss''':
__lowerCAmelCase: Tuple = v
else:
__lowerCAmelCase: int = k.split('_' )
__lowerCAmelCase: List[Any] = ' '.join([part.capitalize() for part in splits[1:]] )
__lowerCAmelCase: List[Any] = v
self.training_tracker.write_line(UpperCAmelCase )
self.training_tracker.remove_child()
__lowerCAmelCase: List[str] = None
# Evaluation takes a long time so we should force the next update.
__lowerCAmelCase: str = True
def UpperCAmelCase ( self : int , UpperCAmelCase : int , UpperCAmelCase : Tuple , UpperCAmelCase : Optional[int] , **UpperCAmelCase : Optional[int] ) -> Optional[int]:
self.training_tracker.update(
state.global_step , comment=F'''Epoch {int(state.epoch )}/{state.num_train_epochs}''' , force_update=UpperCAmelCase )
__lowerCAmelCase: Union[str, Any] = None
| 322 | 1 |
import os
import socket
from contextlib import contextmanager
import torch
from ..commands.config.default import write_basic_config # noqa: F401
from ..state import PartialState
from .dataclasses import DistributedType
from .imports import is_deepspeed_available, is_tpu_available
from .transformer_engine import convert_model
from .versions import is_torch_version
if is_deepspeed_available():
from deepspeed import DeepSpeedEngine
if is_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
def _a ( SCREAMING_SNAKE_CASE : Optional[int] ) -> Dict:
"""simple docstring"""
if is_torch_version('<' , '2.0.0' ) or not hasattr(SCREAMING_SNAKE_CASE , '_dynamo' ):
return False
return isinstance(SCREAMING_SNAKE_CASE , torch._dynamo.eval_frame.OptimizedModule )
def _a ( SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : bool = True ) -> Any:
"""simple docstring"""
__lowerCAmelCase: str = (torch.nn.parallel.DistributedDataParallel, torch.nn.DataParallel)
__lowerCAmelCase: List[Any] = is_compiled_module(SCREAMING_SNAKE_CASE )
if is_compiled:
__lowerCAmelCase: int = model
__lowerCAmelCase: Tuple = model._orig_mod
if is_deepspeed_available():
options += (DeepSpeedEngine,)
while isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
__lowerCAmelCase: Tuple = model.module
if not keep_fpaa_wrapper:
__lowerCAmelCase: Dict = getattr(SCREAMING_SNAKE_CASE , 'forward' )
__lowerCAmelCase: int = model.__dict__.pop('_original_forward' , SCREAMING_SNAKE_CASE )
if original_forward is not None:
while hasattr(SCREAMING_SNAKE_CASE , '__wrapped__' ):
__lowerCAmelCase: Optional[Any] = forward.__wrapped__
if forward == original_forward:
break
__lowerCAmelCase: Optional[Any] = forward
if getattr(SCREAMING_SNAKE_CASE , '_converted_to_transformer_engine' , SCREAMING_SNAKE_CASE ):
convert_model(SCREAMING_SNAKE_CASE , to_transformer_engine=SCREAMING_SNAKE_CASE )
if is_compiled:
__lowerCAmelCase: Optional[Any] = model
__lowerCAmelCase: List[str] = compiled_model
return model
def _a ( ) -> Any:
"""simple docstring"""
PartialState().wait_for_everyone()
def _a ( SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : Optional[Any] ) -> str:
"""simple docstring"""
if PartialState().distributed_type == DistributedType.TPU:
xm.save(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
elif PartialState().local_process_index == 0:
torch.save(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
@contextmanager
def _a ( **SCREAMING_SNAKE_CASE : Tuple ) -> List[Any]:
"""simple docstring"""
for key, value in kwargs.items():
__lowerCAmelCase: Union[str, Any] = str(SCREAMING_SNAKE_CASE )
yield
for key in kwargs:
if key.upper() in os.environ:
del os.environ[key.upper()]
def _a ( SCREAMING_SNAKE_CASE : Optional[Any] ) -> Tuple:
"""simple docstring"""
if not hasattr(SCREAMING_SNAKE_CASE , '__qualname__' ) and not hasattr(SCREAMING_SNAKE_CASE , '__name__' ):
__lowerCAmelCase: Union[str, Any] = getattr(SCREAMING_SNAKE_CASE , '__class__' , SCREAMING_SNAKE_CASE )
if hasattr(SCREAMING_SNAKE_CASE , '__qualname__' ):
return obj.__qualname__
if hasattr(SCREAMING_SNAKE_CASE , '__name__' ):
return obj.__name__
return str(SCREAMING_SNAKE_CASE )
def _a ( SCREAMING_SNAKE_CASE : Union[str, Any] , SCREAMING_SNAKE_CASE : Optional[Any] ) -> Tuple:
"""simple docstring"""
for key, value in source.items():
if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
__lowerCAmelCase: Optional[int] = destination.setdefault(SCREAMING_SNAKE_CASE , {} )
merge_dicts(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
else:
__lowerCAmelCase: Optional[Any] = value
return destination
def _a ( SCREAMING_SNAKE_CASE : int = None ) -> bool:
"""simple docstring"""
if port is None:
__lowerCAmelCase: Dict = 2_95_00
with socket.socket(socket.AF_INET , socket.SOCK_STREAM ) as s:
return s.connect_ex(('localhost', port) ) == 0
| 322 |
import os
from datetime import datetime as dt
from github import Github
_a = [
'''good first issue''',
'''feature request''',
'''wip''',
]
def _a ( ) -> List[Any]:
"""simple docstring"""
__lowerCAmelCase: Dict = Github(os.environ['GITHUB_TOKEN'] )
__lowerCAmelCase: Tuple = g.get_repo('huggingface/accelerate' )
__lowerCAmelCase: str = repo.get_issues(state='open' )
for issue in open_issues:
__lowerCAmelCase: Optional[int] = sorted([comment for comment in issue.get_comments()] , key=lambda SCREAMING_SNAKE_CASE : i.created_at , reverse=SCREAMING_SNAKE_CASE )
__lowerCAmelCase: Dict = comments[0] if len(SCREAMING_SNAKE_CASE ) > 0 else None
__lowerCAmelCase: Tuple = dt.utcnow()
__lowerCAmelCase: Optional[int] = (current_time - issue.updated_at).days
__lowerCAmelCase: str = (current_time - issue.created_at).days
if (
last_comment is not None
and last_comment.user.login == "github-actions[bot]"
and days_since_updated > 7
and days_since_creation >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Close issue since it has been 7 days of inactivity since bot mention.
issue.edit(state='closed' )
elif (
days_since_updated > 23
and days_since_creation >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Add stale comment
issue.create_comment(
'This issue has been automatically marked as stale because it has not had '
'recent activity. If you think this still needs to be addressed '
'please comment on this thread.\n\nPlease note that issues that do not follow the '
'[contributing guidelines](https://github.com/huggingface/accelerate/blob/main/CONTRIBUTING.md) '
'are likely to be ignored.' )
if __name__ == "__main__":
main()
| 322 | 1 |
import argparse
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.local_sgd import LocalSGD
########################################################################
# This is a fully working simple example to use Accelerate
# with LocalSGD, which is a method to synchronize model
# parameters every K batches. It is different, but complementary
# to gradient accumulation.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
_a = 1_6
_a = 3_2
def _a ( SCREAMING_SNAKE_CASE : Accelerator , SCREAMING_SNAKE_CASE : int = 16 ) -> Optional[int]:
"""simple docstring"""
__lowerCAmelCase: Optional[int] = AutoTokenizer.from_pretrained('bert-base-cased' )
__lowerCAmelCase: Union[str, Any] = load_dataset('glue' , 'mrpc' )
def tokenize_function(SCREAMING_SNAKE_CASE : Optional[int] ):
# max_length=None => use the model max length (it's actually the default)
__lowerCAmelCase: str = tokenizer(examples['sentence1'] , examples['sentence2'] , truncation=SCREAMING_SNAKE_CASE , max_length=SCREAMING_SNAKE_CASE )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
__lowerCAmelCase: Optional[Any] = datasets.map(
SCREAMING_SNAKE_CASE , batched=SCREAMING_SNAKE_CASE , remove_columns=['idx', 'sentence1', 'sentence2'] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
__lowerCAmelCase: Optional[Any] = tokenized_datasets.rename_column('label' , 'labels' )
def collate_fn(SCREAMING_SNAKE_CASE : Dict ):
# On TPU it's best to pad everything to the same length or training will be very slow.
__lowerCAmelCase: Optional[Any] = 1_28 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
__lowerCAmelCase: Tuple = 16
elif accelerator.mixed_precision != "no":
__lowerCAmelCase: Dict = 8
else:
__lowerCAmelCase: Any = None
return tokenizer.pad(
SCREAMING_SNAKE_CASE , padding='longest' , max_length=SCREAMING_SNAKE_CASE , pad_to_multiple_of=SCREAMING_SNAKE_CASE , return_tensors='pt' , )
# Instantiate dataloaders.
__lowerCAmelCase: List[str] = DataLoader(
tokenized_datasets['train'] , shuffle=SCREAMING_SNAKE_CASE , collate_fn=SCREAMING_SNAKE_CASE , batch_size=SCREAMING_SNAKE_CASE )
__lowerCAmelCase: Any = DataLoader(
tokenized_datasets['validation'] , shuffle=SCREAMING_SNAKE_CASE , collate_fn=SCREAMING_SNAKE_CASE , batch_size=SCREAMING_SNAKE_CASE )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get('''TESTING_MOCKED_DATALOADERS''', None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
_a = mocked_dataloaders # noqa: F811
def _a ( SCREAMING_SNAKE_CASE : List[Any] , SCREAMING_SNAKE_CASE : Union[str, Any] ) -> Tuple:
"""simple docstring"""
if os.environ.get('TESTING_MOCKED_DATALOADERS' , SCREAMING_SNAKE_CASE ) == "1":
__lowerCAmelCase: Union[str, Any] = 2
# New Code #
__lowerCAmelCase: Any = int(args.gradient_accumulation_steps )
__lowerCAmelCase: Optional[int] = int(args.local_sgd_steps )
# Initialize accelerator
__lowerCAmelCase: Any = Accelerator(
cpu=args.cpu , mixed_precision=args.mixed_precision , gradient_accumulation_steps=SCREAMING_SNAKE_CASE )
if accelerator.distributed_type not in [DistributedType.NO, DistributedType.MULTI_CPU, DistributedType.MULTI_GPU]:
raise NotImplementedError('LocalSGD is supported only for CPUs and GPUs (no DeepSpeed or MegatronLM)' )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
__lowerCAmelCase: Dict = config['lr']
__lowerCAmelCase: Optional[int] = int(config['num_epochs'] )
__lowerCAmelCase: List[str] = int(config['seed'] )
__lowerCAmelCase: Tuple = int(config['batch_size'] )
__lowerCAmelCase: Union[str, Any] = evaluate.load('glue' , 'mrpc' )
set_seed(SCREAMING_SNAKE_CASE )
__lowerCAmelCase , __lowerCAmelCase: Dict = get_dataloaders(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
__lowerCAmelCase: Optional[Any] = AutoModelForSequenceClassification.from_pretrained('bert-base-cased' , return_dict=SCREAMING_SNAKE_CASE )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
__lowerCAmelCase: Tuple = model.to(accelerator.device )
# Instantiate optimizer
__lowerCAmelCase: Dict = AdamW(params=model.parameters() , lr=SCREAMING_SNAKE_CASE )
# Instantiate scheduler
__lowerCAmelCase: Tuple = get_linear_schedule_with_warmup(
optimizer=SCREAMING_SNAKE_CASE , num_warmup_steps=1_00 , num_training_steps=(len(SCREAMING_SNAKE_CASE ) * num_epochs) , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase: List[str] = accelerator.prepare(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# Now we train the model
for epoch in range(SCREAMING_SNAKE_CASE ):
model.train()
with LocalSGD(
accelerator=SCREAMING_SNAKE_CASE , model=SCREAMING_SNAKE_CASE , local_sgd_steps=SCREAMING_SNAKE_CASE , enabled=local_sgd_steps is not None ) as local_sgd:
for step, batch in enumerate(SCREAMING_SNAKE_CASE ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
# New code #
# We use the new `accumulate` context manager to perform gradient accumulation
# We also currently do not support TPUs nor advise it as bugs were found on the XLA side when running our tests.
with accelerator.accumulate(SCREAMING_SNAKE_CASE ):
__lowerCAmelCase: List[Any] = model(**SCREAMING_SNAKE_CASE )
__lowerCAmelCase: Optional[int] = output.loss
accelerator.backward(SCREAMING_SNAKE_CASE )
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
# LocalSGD-specific line
local_sgd.step()
model.eval()
for step, batch in enumerate(SCREAMING_SNAKE_CASE ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
__lowerCAmelCase: int = model(**SCREAMING_SNAKE_CASE )
__lowerCAmelCase: str = outputs.logits.argmax(dim=-1 )
__lowerCAmelCase , __lowerCAmelCase: List[str] = accelerator.gather_for_metrics((predictions, batch['labels']) )
metric.add_batch(
predictions=SCREAMING_SNAKE_CASE , references=SCREAMING_SNAKE_CASE , )
__lowerCAmelCase: Dict = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(f'''epoch {epoch}:''' , SCREAMING_SNAKE_CASE )
def _a ( ) -> List[Any]:
"""simple docstring"""
__lowerCAmelCase: Any = argparse.ArgumentParser(description='Simple example of training script.' )
parser.add_argument(
'--mixed_precision' , type=SCREAMING_SNAKE_CASE , default=SCREAMING_SNAKE_CASE , choices=['no', 'fp16', 'bf16', 'fp8'] , help='Whether to use mixed precision. Choose'
'between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.'
'and an Nvidia Ampere GPU.' , )
# New Code #
parser.add_argument(
'--gradient_accumulation_steps' , type=SCREAMING_SNAKE_CASE , default=1 , help='The number of minibatches to be ran before gradients are accumulated.' , )
parser.add_argument(
'--local_sgd_steps' , type=SCREAMING_SNAKE_CASE , default=8 , help='Number of local SGD steps or None to disable local SGD' )
parser.add_argument('--cpu' , action='store_true' , help='If passed, will train on the CPU.' )
__lowerCAmelCase: Optional[Any] = parser.parse_args()
__lowerCAmelCase: Dict = {'lr': 2E-5, 'num_epochs': 3, 'seed': 42, 'batch_size': 16}
training_function(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
main()
| 322 |
from .testing import (
are_the_same_tensors,
execute_subprocess_async,
require_bnb,
require_cpu,
require_cuda,
require_huggingface_suite,
require_mps,
require_multi_gpu,
require_multi_xpu,
require_safetensors,
require_single_gpu,
require_single_xpu,
require_torch_min_version,
require_tpu,
require_xpu,
skip,
slow,
)
from .training import RegressionDataset, RegressionModel, RegressionModelaXPU
from .scripts import test_script, test_sync, test_ops # isort: skip
| 322 | 1 |
import warnings
from contextlib import contextmanager
from ...processing_utils import ProcessorMixin
from .feature_extraction_wavaveca import WavaVecaFeatureExtractor
from .tokenization_wavaveca import WavaVecaCTCTokenizer
class A_ ( snake_case__ ):
_lowercase : Any = 'Wav2Vec2FeatureExtractor'
_lowercase : Optional[int] = 'AutoTokenizer'
def __init__( self : Tuple , UpperCAmelCase : Any , UpperCAmelCase : List[Any] ) -> Optional[int]:
super().__init__(UpperCAmelCase , UpperCAmelCase )
__lowerCAmelCase: List[str] = self.feature_extractor
__lowerCAmelCase: Dict = False
@classmethod
def UpperCAmelCase ( cls : Optional[Any] , UpperCAmelCase : Optional[Any] , **UpperCAmelCase : Optional[int] ) -> List[Any]:
try:
return super().from_pretrained(UpperCAmelCase , **UpperCAmelCase )
except OSError:
warnings.warn(
F'''Loading a tokenizer inside {cls.__name__} from a config that does not'''
' include a `tokenizer_class` attribute is deprecated and will be '
'removed in v5. Please add `\'tokenizer_class\': \'Wav2Vec2CTCTokenizer\'`'
' attribute to either your `config.json` or `tokenizer_config.json` '
'file to suppress this warning: ' , UpperCAmelCase , )
__lowerCAmelCase: int = WavaVecaFeatureExtractor.from_pretrained(UpperCAmelCase , **UpperCAmelCase )
__lowerCAmelCase: List[str] = WavaVecaCTCTokenizer.from_pretrained(UpperCAmelCase , **UpperCAmelCase )
return cls(feature_extractor=UpperCAmelCase , tokenizer=UpperCAmelCase )
def __call__( self : Union[str, Any] , *UpperCAmelCase : Tuple , **UpperCAmelCase : str ) -> Tuple:
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor(*UpperCAmelCase , **UpperCAmelCase )
if "raw_speech" in kwargs:
warnings.warn('Using `raw_speech` as a keyword argument is deprecated. Use `audio` instead.' )
__lowerCAmelCase: Any = kwargs.pop('raw_speech' )
else:
__lowerCAmelCase: Optional[int] = kwargs.pop('audio' , UpperCAmelCase )
__lowerCAmelCase: Optional[int] = kwargs.pop('sampling_rate' , UpperCAmelCase )
__lowerCAmelCase: Optional[int] = kwargs.pop('text' , UpperCAmelCase )
if len(UpperCAmelCase ) > 0:
__lowerCAmelCase: Tuple = args[0]
__lowerCAmelCase: Union[str, Any] = args[1:]
if audio is None and text is None:
raise ValueError('You need to specify either an `audio` or `text` input to process.' )
if audio is not None:
__lowerCAmelCase: Any = self.feature_extractor(UpperCAmelCase , *UpperCAmelCase , sampling_rate=UpperCAmelCase , **UpperCAmelCase )
if text is not None:
__lowerCAmelCase: Optional[Any] = self.tokenizer(UpperCAmelCase , **UpperCAmelCase )
if text is None:
return inputs
elif audio is None:
return encodings
else:
__lowerCAmelCase: Optional[int] = encodings['input_ids']
return inputs
def UpperCAmelCase ( self : Any , *UpperCAmelCase : Tuple , **UpperCAmelCase : Dict ) -> str:
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor.pad(*UpperCAmelCase , **UpperCAmelCase )
__lowerCAmelCase: List[str] = kwargs.pop('input_features' , UpperCAmelCase )
__lowerCAmelCase: Optional[Any] = kwargs.pop('labels' , UpperCAmelCase )
if len(UpperCAmelCase ) > 0:
__lowerCAmelCase: Any = args[0]
__lowerCAmelCase: List[Any] = args[1:]
if input_features is not None:
__lowerCAmelCase: Optional[int] = self.feature_extractor.pad(UpperCAmelCase , *UpperCAmelCase , **UpperCAmelCase )
if labels is not None:
__lowerCAmelCase: str = self.tokenizer.pad(UpperCAmelCase , **UpperCAmelCase )
if labels is None:
return input_features
elif input_features is None:
return labels
else:
__lowerCAmelCase: Tuple = labels['input_ids']
return input_features
def UpperCAmelCase ( self : int , *UpperCAmelCase : int , **UpperCAmelCase : Union[str, Any] ) -> str:
return self.tokenizer.batch_decode(*UpperCAmelCase , **UpperCAmelCase )
def UpperCAmelCase ( self : List[Any] , *UpperCAmelCase : str , **UpperCAmelCase : str ) -> Any:
return self.tokenizer.decode(*UpperCAmelCase , **UpperCAmelCase )
@contextmanager
def UpperCAmelCase ( self : Dict ) -> Union[str, Any]:
warnings.warn(
'`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your '
'labels by using the argument `text` of the regular `__call__` method (either in the same call as '
'your audio inputs, or in a separate call.' )
__lowerCAmelCase: List[Any] = True
__lowerCAmelCase: List[Any] = self.tokenizer
yield
__lowerCAmelCase: Optional[Any] = self.feature_extractor
__lowerCAmelCase: List[str] = False
| 322 |
import re
from typing import Callable, List, Optional, Union
import tensorflow as tf
try:
from tensorflow.keras.optimizers.legacy import Adam
except ImportError:
from tensorflow.keras.optimizers import Adam
class A_ ( tf.keras.optimizers.schedules.LearningRateSchedule ):
def __init__( self : Tuple , UpperCAmelCase : float , UpperCAmelCase : Callable , UpperCAmelCase : int , UpperCAmelCase : float = 1.0 , UpperCAmelCase : str = None , ) -> Union[str, Any]:
super().__init__()
__lowerCAmelCase: Optional[Any] = initial_learning_rate
__lowerCAmelCase: str = warmup_steps
__lowerCAmelCase: Optional[int] = power
__lowerCAmelCase: str = decay_schedule_fn
__lowerCAmelCase: Tuple = name
def __call__( self : int , UpperCAmelCase : Dict ) -> Optional[int]:
with tf.name_scope(self.name or 'WarmUp' ) as name:
# Implements polynomial warmup. i.e., if global_step < warmup_steps, the
# learning rate will be `global_step/num_warmup_steps * init_lr`.
__lowerCAmelCase: List[str] = tf.cast(UpperCAmelCase , tf.floataa )
__lowerCAmelCase: Tuple = tf.cast(self.warmup_steps , tf.floataa )
__lowerCAmelCase: List[str] = global_step_float / warmup_steps_float
__lowerCAmelCase: List[str] = self.initial_learning_rate * tf.math.pow(UpperCAmelCase , self.power )
return tf.cond(
global_step_float < warmup_steps_float , lambda: warmup_learning_rate , lambda: self.decay_schedule_fn(step - self.warmup_steps ) , name=UpperCAmelCase , )
def UpperCAmelCase ( self : Tuple ) -> int:
return {
"initial_learning_rate": self.initial_learning_rate,
"decay_schedule_fn": self.decay_schedule_fn,
"warmup_steps": self.warmup_steps,
"power": self.power,
"name": self.name,
}
def _a ( SCREAMING_SNAKE_CASE : float , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : float = 0.0 , SCREAMING_SNAKE_CASE : float = 0.9 , SCREAMING_SNAKE_CASE : float = 0.9_9_9 , SCREAMING_SNAKE_CASE : float = 1E-8 , SCREAMING_SNAKE_CASE : Optional[float] = None , SCREAMING_SNAKE_CASE : Optional[float] = None , SCREAMING_SNAKE_CASE : float = 0.0 , SCREAMING_SNAKE_CASE : float = 1.0 , SCREAMING_SNAKE_CASE : Optional[List[str]] = None , ) -> Optional[Any]:
"""simple docstring"""
__lowerCAmelCase: Tuple = tf.keras.optimizers.schedules.PolynomialDecay(
initial_learning_rate=SCREAMING_SNAKE_CASE , decay_steps=num_train_steps - num_warmup_steps , end_learning_rate=init_lr * min_lr_ratio , power=SCREAMING_SNAKE_CASE , )
if num_warmup_steps:
__lowerCAmelCase: Optional[int] = WarmUp(
initial_learning_rate=SCREAMING_SNAKE_CASE , decay_schedule_fn=SCREAMING_SNAKE_CASE , warmup_steps=SCREAMING_SNAKE_CASE , )
if weight_decay_rate > 0.0:
__lowerCAmelCase: List[Any] = AdamWeightDecay(
learning_rate=SCREAMING_SNAKE_CASE , weight_decay_rate=SCREAMING_SNAKE_CASE , beta_a=SCREAMING_SNAKE_CASE , beta_a=SCREAMING_SNAKE_CASE , epsilon=SCREAMING_SNAKE_CASE , clipnorm=SCREAMING_SNAKE_CASE , global_clipnorm=SCREAMING_SNAKE_CASE , exclude_from_weight_decay=['LayerNorm', 'layer_norm', 'bias'] , include_in_weight_decay=SCREAMING_SNAKE_CASE , )
else:
__lowerCAmelCase: Dict = tf.keras.optimizers.Adam(
learning_rate=SCREAMING_SNAKE_CASE , beta_a=SCREAMING_SNAKE_CASE , beta_a=SCREAMING_SNAKE_CASE , epsilon=SCREAMING_SNAKE_CASE , clipnorm=SCREAMING_SNAKE_CASE , global_clipnorm=SCREAMING_SNAKE_CASE , )
# We return the optimizer and the LR scheduler in order to better track the
# evolution of the LR independently of the optimizer.
return optimizer, lr_schedule
class A_ ( snake_case__ ):
def __init__( self : Tuple , UpperCAmelCase : Union[float, tf.keras.optimizers.schedules.LearningRateSchedule] = 0.001 , UpperCAmelCase : float = 0.9 , UpperCAmelCase : float = 0.999 , UpperCAmelCase : float = 1E-7 , UpperCAmelCase : bool = False , UpperCAmelCase : float = 0.0 , UpperCAmelCase : Optional[List[str]] = None , UpperCAmelCase : Optional[List[str]] = None , UpperCAmelCase : str = "AdamWeightDecay" , **UpperCAmelCase : str , ) -> int:
super().__init__(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , **UpperCAmelCase )
__lowerCAmelCase: List[Any] = weight_decay_rate
__lowerCAmelCase: List[str] = include_in_weight_decay
__lowerCAmelCase: Optional[Any] = exclude_from_weight_decay
@classmethod
def UpperCAmelCase ( cls : str , UpperCAmelCase : Tuple ) -> Optional[int]:
__lowerCAmelCase: Union[str, Any] = {'WarmUp': WarmUp}
return super(UpperCAmelCase , cls ).from_config(UpperCAmelCase , custom_objects=UpperCAmelCase )
def UpperCAmelCase ( self : Optional[int] , UpperCAmelCase : Any , UpperCAmelCase : int , UpperCAmelCase : Optional[int] ) -> Union[str, Any]:
super(UpperCAmelCase , self )._prepare_local(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
__lowerCAmelCase: Union[str, Any] = tf.constant(
self.weight_decay_rate , name='adam_weight_decay_rate' )
def UpperCAmelCase ( self : Dict , UpperCAmelCase : Optional[int] , UpperCAmelCase : List[str] , UpperCAmelCase : Optional[Any] ) -> List[str]:
__lowerCAmelCase: Dict = self._do_use_weight_decay(var.name )
if do_decay:
return var.assign_sub(
learning_rate * var * apply_state[(var.device, var.dtype.base_dtype)]['weight_decay_rate'] , use_locking=self._use_locking , )
return tf.no_op()
def UpperCAmelCase ( self : Optional[int] , UpperCAmelCase : Optional[Any] , UpperCAmelCase : Optional[int]=None , **UpperCAmelCase : Optional[Any] ) -> Union[str, Any]:
__lowerCAmelCase , __lowerCAmelCase: Tuple = list(zip(*UpperCAmelCase ) )
return super(UpperCAmelCase , self ).apply_gradients(zip(UpperCAmelCase , UpperCAmelCase ) , name=UpperCAmelCase , **UpperCAmelCase )
def UpperCAmelCase ( self : str , UpperCAmelCase : Optional[Any] , UpperCAmelCase : List[str] , UpperCAmelCase : Any ) -> str:
if apply_state is None:
return self._decayed_lr_t[var_dtype], {}
__lowerCAmelCase: Dict = apply_state or {}
__lowerCAmelCase: Union[str, Any] = apply_state.get((var_device, var_dtype) )
if coefficients is None:
__lowerCAmelCase: str = self._fallback_apply_state(UpperCAmelCase , UpperCAmelCase )
__lowerCAmelCase: Tuple = coefficients
return coefficients["lr_t"], {"apply_state": apply_state}
def UpperCAmelCase ( self : str , UpperCAmelCase : Tuple , UpperCAmelCase : Optional[Any] , UpperCAmelCase : List[Any]=None ) -> List[Any]:
__lowerCAmelCase , __lowerCAmelCase: Optional[int] = self._get_lr(var.device , var.dtype.base_dtype , UpperCAmelCase )
__lowerCAmelCase: Optional[int] = self._decay_weights_op(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
with tf.control_dependencies([decay] ):
return super(UpperCAmelCase , self )._resource_apply_dense(UpperCAmelCase , UpperCAmelCase , **UpperCAmelCase )
def UpperCAmelCase ( self : List[Any] , UpperCAmelCase : List[str] , UpperCAmelCase : Any , UpperCAmelCase : str , UpperCAmelCase : List[Any]=None ) -> List[str]:
__lowerCAmelCase , __lowerCAmelCase: Any = self._get_lr(var.device , var.dtype.base_dtype , UpperCAmelCase )
__lowerCAmelCase: str = self._decay_weights_op(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
with tf.control_dependencies([decay] ):
return super(UpperCAmelCase , self )._resource_apply_sparse(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , **UpperCAmelCase )
def UpperCAmelCase ( self : Union[str, Any] ) -> List[str]:
__lowerCAmelCase: List[str] = super().get_config()
config.update({'weight_decay_rate': self.weight_decay_rate} )
return config
def UpperCAmelCase ( self : Union[str, Any] , UpperCAmelCase : Union[str, Any] ) -> Union[str, Any]:
if self.weight_decay_rate == 0:
return False
if self._include_in_weight_decay:
for r in self._include_in_weight_decay:
if re.search(UpperCAmelCase , UpperCAmelCase ) is not None:
return True
if self._exclude_from_weight_decay:
for r in self._exclude_from_weight_decay:
if re.search(UpperCAmelCase , UpperCAmelCase ) is not None:
return False
return True
class A_ ( snake_case__ ):
def __init__( self : int ) -> List[Any]:
__lowerCAmelCase: Tuple = []
__lowerCAmelCase: int = None
@property
def UpperCAmelCase ( self : Dict ) -> List[Any]:
if self._accum_steps is None:
__lowerCAmelCase: List[Any] = tf.Variable(
tf.constant(0 , dtype=tf.intaa ) , trainable=UpperCAmelCase , synchronization=tf.VariableSynchronization.ON_READ , aggregation=tf.VariableAggregation.ONLY_FIRST_REPLICA , )
return self._accum_steps.value()
@property
def UpperCAmelCase ( self : Union[str, Any] ) -> int:
if not self._gradients:
raise ValueError('The accumulator should be called first to initialize the gradients' )
return [gradient.value() if gradient is not None else gradient for gradient in self._gradients]
def __call__( self : Optional[Any] , UpperCAmelCase : Any ) -> Any:
if not self._gradients:
__lowerCAmelCase: Any = self.step # Create the step variable.
self._gradients.extend(
[
tf.Variable(
tf.zeros_like(UpperCAmelCase ) , trainable=UpperCAmelCase , synchronization=tf.VariableSynchronization.ON_READ , aggregation=tf.VariableAggregation.ONLY_FIRST_REPLICA , )
if gradient is not None
else gradient
for gradient in gradients
] )
if len(UpperCAmelCase ) != len(self._gradients ):
raise ValueError(F'''Expected {len(self._gradients )} gradients, but got {len(UpperCAmelCase )}''' )
for accum_gradient, gradient in zip(self._gradients , UpperCAmelCase ):
if accum_gradient is not None and gradient is not None:
accum_gradient.assign_add(UpperCAmelCase )
self._accum_steps.assign_add(1 )
def UpperCAmelCase ( self : int ) -> int:
if not self._gradients:
return
self._accum_steps.assign(0 )
for gradient in self._gradients:
if gradient is not None:
gradient.assign(tf.zeros_like(UpperCAmelCase ) )
| 322 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
_a = {
'''configuration_maskformer''': ['''MASKFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''MaskFormerConfig'''],
'''configuration_maskformer_swin''': ['''MaskFormerSwinConfig'''],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a = ['''MaskFormerFeatureExtractor''']
_a = ['''MaskFormerImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a = [
'''MASKFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''MaskFormerForInstanceSegmentation''',
'''MaskFormerModel''',
'''MaskFormerPreTrainedModel''',
]
_a = [
'''MaskFormerSwinBackbone''',
'''MaskFormerSwinModel''',
'''MaskFormerSwinPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_maskformer import MASKFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, MaskFormerConfig
from .configuration_maskformer_swin import MaskFormerSwinConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_maskformer import MaskFormerFeatureExtractor
from .image_processing_maskformer import MaskFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_maskformer import (
MASKFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
MaskFormerForInstanceSegmentation,
MaskFormerModel,
MaskFormerPreTrainedModel,
)
from .modeling_maskformer_swin import (
MaskFormerSwinBackbone,
MaskFormerSwinModel,
MaskFormerSwinPreTrainedModel,
)
else:
import sys
_a = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
| 322 |
import math
from typing import Callable, List, Optional, Union
import numpy as np
import PIL
import torch
from PIL import Image
from transformers import CLIPTextModel, CLIPTokenizer
from diffusers.models import AutoencoderKL, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_upscale import StableDiffusionUpscalePipeline
from diffusers.schedulers import DDIMScheduler, DDPMScheduler, LMSDiscreteScheduler, PNDMScheduler
def _a ( SCREAMING_SNAKE_CASE : Any , SCREAMING_SNAKE_CASE : Any , SCREAMING_SNAKE_CASE : Optional[Any]=[] ) -> str:
"""simple docstring"""
__lowerCAmelCase: Optional[int] = size[0] - overlap_pixels * 2
__lowerCAmelCase: str = size[1] - overlap_pixels * 2
for letter in ["l", "r"]:
if letter in remove_borders:
size_x += overlap_pixels
for letter in ["t", "b"]:
if letter in remove_borders:
size_y += overlap_pixels
__lowerCAmelCase: Any = np.ones((size_y, size_x) , dtype=np.uinta ) * 2_55
__lowerCAmelCase: int = np.pad(SCREAMING_SNAKE_CASE , mode='linear_ramp' , pad_width=SCREAMING_SNAKE_CASE , end_values=0 )
if "l" in remove_borders:
__lowerCAmelCase: Dict = mask[:, overlap_pixels : mask.shape[1]]
if "r" in remove_borders:
__lowerCAmelCase: Tuple = mask[:, 0 : mask.shape[1] - overlap_pixels]
if "t" in remove_borders:
__lowerCAmelCase: List[Any] = mask[overlap_pixels : mask.shape[0], :]
if "b" in remove_borders:
__lowerCAmelCase: List[str] = mask[0 : mask.shape[0] - overlap_pixels, :]
return mask
def _a ( SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : List[str] ) -> List[Any]:
"""simple docstring"""
return max(SCREAMING_SNAKE_CASE , min(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) )
def _a ( SCREAMING_SNAKE_CASE : [int] , SCREAMING_SNAKE_CASE : [int] , SCREAMING_SNAKE_CASE : [int] ) -> int:
"""simple docstring"""
return (
clamp(rect[0] , min[0] , max[0] ),
clamp(rect[1] , min[1] , max[1] ),
clamp(rect[2] , min[0] , max[0] ),
clamp(rect[3] , min[1] , max[1] ),
)
def _a ( SCREAMING_SNAKE_CASE : [int] , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : [int] ) -> Union[str, Any]:
"""simple docstring"""
__lowerCAmelCase: Tuple = list(SCREAMING_SNAKE_CASE )
rect[0] -= overlap
rect[1] -= overlap
rect[2] += overlap
rect[3] += overlap
__lowerCAmelCase: int = clamp_rect(SCREAMING_SNAKE_CASE , [0, 0] , [image_size[0], image_size[1]] )
return rect
def _a ( SCREAMING_SNAKE_CASE : Dict , SCREAMING_SNAKE_CASE : Dict , SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : Optional[Any] ) -> Any:
"""simple docstring"""
__lowerCAmelCase: List[Any] = Image.new('RGB' , (tile.size[0] + original_slice, tile.size[1]) )
result.paste(
original_image.resize((tile.size[0], tile.size[1]) , Image.BICUBIC ).crop(
(slice_x, 0, slice_x + original_slice, tile.size[1]) ) , (0, 0) , )
result.paste(SCREAMING_SNAKE_CASE , (original_slice, 0) )
return result
def _a ( SCREAMING_SNAKE_CASE : List[Any] , SCREAMING_SNAKE_CASE : Any ) -> str:
"""simple docstring"""
__lowerCAmelCase: Union[str, Any] = (original_image_slice * 4, 0, tile.size[0], tile.size[1])
__lowerCAmelCase: List[Any] = tile.crop(SCREAMING_SNAKE_CASE )
return tile
def _a ( SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : List[Any] ) -> Optional[int]:
"""simple docstring"""
__lowerCAmelCase: List[str] = n % d
return n - divisor
class A_ ( snake_case__ ):
def __init__( self : Optional[Any] , UpperCAmelCase : AutoencoderKL , UpperCAmelCase : CLIPTextModel , UpperCAmelCase : CLIPTokenizer , UpperCAmelCase : UNetaDConditionModel , UpperCAmelCase : DDPMScheduler , UpperCAmelCase : Union[DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler] , UpperCAmelCase : int = 3_5_0 , ) -> Optional[Any]:
super().__init__(
vae=UpperCAmelCase , text_encoder=UpperCAmelCase , tokenizer=UpperCAmelCase , unet=UpperCAmelCase , low_res_scheduler=UpperCAmelCase , scheduler=UpperCAmelCase , max_noise_level=UpperCAmelCase , )
def UpperCAmelCase ( self : Any , UpperCAmelCase : List[str] , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : str , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : Optional[int] , UpperCAmelCase : Dict , UpperCAmelCase : str , **UpperCAmelCase : List[Any] ) -> Optional[int]:
torch.manual_seed(0 )
__lowerCAmelCase: Optional[int] = (
min(image.size[0] - (tile_size + original_image_slice) , x * tile_size ),
min(image.size[1] - (tile_size + original_image_slice) , y * tile_size ),
min(image.size[0] , (x + 1) * tile_size ),
min(image.size[1] , (y + 1) * tile_size ),
)
__lowerCAmelCase: Optional[Any] = add_overlap_rect(UpperCAmelCase , UpperCAmelCase , image.size )
__lowerCAmelCase: Any = image.crop(UpperCAmelCase )
__lowerCAmelCase: Any = ((crop_rect[0] + ((crop_rect[2] - crop_rect[0]) / 2)) / image.size[0]) * tile.size[0]
__lowerCAmelCase: Tuple = translated_slice_x - (original_image_slice / 2)
__lowerCAmelCase: Union[str, Any] = max(0 , UpperCAmelCase )
__lowerCAmelCase: Optional[int] = squeeze_tile(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
__lowerCAmelCase: Union[str, Any] = to_input.size
__lowerCAmelCase: List[Any] = to_input.resize((tile_size, tile_size) , Image.BICUBIC )
__lowerCAmelCase: int = super(UpperCAmelCase , self ).__call__(image=UpperCAmelCase , **UpperCAmelCase ).images[0]
__lowerCAmelCase: Dict = upscaled_tile.resize((orig_input_size[0] * 4, orig_input_size[1] * 4) , Image.BICUBIC )
__lowerCAmelCase: Union[str, Any] = unsqueeze_tile(UpperCAmelCase , UpperCAmelCase )
__lowerCAmelCase: Optional[int] = upscaled_tile.resize((tile.size[0] * 4, tile.size[1] * 4) , Image.BICUBIC )
__lowerCAmelCase: Optional[int] = []
if x == 0:
remove_borders.append('l' )
elif crop_rect[2] == image.size[0]:
remove_borders.append('r' )
if y == 0:
remove_borders.append('t' )
elif crop_rect[3] == image.size[1]:
remove_borders.append('b' )
__lowerCAmelCase: int = Image.fromarray(
make_transparency_mask(
(upscaled_tile.size[0], upscaled_tile.size[1]) , tile_border * 4 , remove_borders=UpperCAmelCase ) , mode='L' , )
final_image.paste(
UpperCAmelCase , (crop_rect_with_overlap[0] * 4, crop_rect_with_overlap[1] * 4) , UpperCAmelCase )
@torch.no_grad()
def __call__( self : Optional[Any] , UpperCAmelCase : Union[str, List[str]] , UpperCAmelCase : Union[PIL.Image.Image, List[PIL.Image.Image]] , UpperCAmelCase : int = 7_5 , UpperCAmelCase : float = 9.0 , UpperCAmelCase : int = 5_0 , UpperCAmelCase : Optional[Union[str, List[str]]] = None , UpperCAmelCase : Optional[int] = 1 , UpperCAmelCase : float = 0.0 , UpperCAmelCase : Optional[torch.Generator] = None , UpperCAmelCase : Optional[torch.FloatTensor] = None , UpperCAmelCase : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , UpperCAmelCase : int = 1 , UpperCAmelCase : int = 1_2_8 , UpperCAmelCase : int = 3_2 , UpperCAmelCase : int = 3_2 , ) -> str:
__lowerCAmelCase: List[Any] = Image.new('RGB' , (image.size[0] * 4, image.size[1] * 4) )
__lowerCAmelCase: str = math.ceil(image.size[0] / tile_size )
__lowerCAmelCase: List[Any] = math.ceil(image.size[1] / tile_size )
__lowerCAmelCase: Optional[Any] = tcx * tcy
__lowerCAmelCase: Tuple = 0
for y in range(UpperCAmelCase ):
for x in range(UpperCAmelCase ):
self._process_tile(
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , prompt=UpperCAmelCase , num_inference_steps=UpperCAmelCase , guidance_scale=UpperCAmelCase , noise_level=UpperCAmelCase , negative_prompt=UpperCAmelCase , num_images_per_prompt=UpperCAmelCase , eta=UpperCAmelCase , generator=UpperCAmelCase , latents=UpperCAmelCase , )
current_count += 1
if callback is not None:
callback({'progress': current_count / total_tile_count, 'image': final_image} )
return final_image
def _a ( ) -> int:
"""simple docstring"""
__lowerCAmelCase: Any = 'stabilityai/stable-diffusion-x4-upscaler'
__lowerCAmelCase: Dict = StableDiffusionTiledUpscalePipeline.from_pretrained(SCREAMING_SNAKE_CASE , revision='fp16' , torch_dtype=torch.floataa )
__lowerCAmelCase: Optional[Any] = pipe.to('cuda' )
__lowerCAmelCase: Tuple = Image.open('../../docs/source/imgs/diffusers_library.jpg' )
def callback(SCREAMING_SNAKE_CASE : Tuple ):
print(f'''progress: {obj['progress']:.4f}''' )
obj["image"].save('diffusers_library_progress.jpg' )
__lowerCAmelCase: str = pipe(image=SCREAMING_SNAKE_CASE , prompt='Black font, white background, vector' , noise_level=40 , callback=SCREAMING_SNAKE_CASE )
final_image.save('diffusers_library.jpg' )
if __name__ == "__main__":
main()
| 322 | 1 |
from __future__ import annotations
def _a ( SCREAMING_SNAKE_CASE : float , SCREAMING_SNAKE_CASE : float , SCREAMING_SNAKE_CASE : float ) -> float:
"""simple docstring"""
if days_between_payments <= 0:
raise ValueError('days_between_payments must be > 0' )
if daily_interest_rate < 0:
raise ValueError('daily_interest_rate must be >= 0' )
if principal <= 0:
raise ValueError('principal must be > 0' )
return principal * daily_interest_rate * days_between_payments
def _a ( SCREAMING_SNAKE_CASE : float , SCREAMING_SNAKE_CASE : float , SCREAMING_SNAKE_CASE : float , ) -> float:
"""simple docstring"""
if number_of_compounding_periods <= 0:
raise ValueError('number_of_compounding_periods must be > 0' )
if nominal_annual_interest_rate_percentage < 0:
raise ValueError('nominal_annual_interest_rate_percentage must be >= 0' )
if principal <= 0:
raise ValueError('principal must be > 0' )
return principal * (
(1 + nominal_annual_interest_rate_percentage) ** number_of_compounding_periods
- 1
)
def _a ( SCREAMING_SNAKE_CASE : float , SCREAMING_SNAKE_CASE : float , SCREAMING_SNAKE_CASE : float , ) -> float:
"""simple docstring"""
if number_of_years <= 0:
raise ValueError('number_of_years must be > 0' )
if nominal_annual_percentage_rate < 0:
raise ValueError('nominal_annual_percentage_rate must be >= 0' )
if principal <= 0:
raise ValueError('principal must be > 0' )
return compound_interest(
SCREAMING_SNAKE_CASE , nominal_annual_percentage_rate / 3_65 , number_of_years * 3_65 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 322 |
def _a ( SCREAMING_SNAKE_CASE : str ) -> Union[str, Any]:
"""simple docstring"""
__lowerCAmelCase: str = len(SCREAMING_SNAKE_CASE )
__lowerCAmelCase: List[Any] = sum(SCREAMING_SNAKE_CASE )
__lowerCAmelCase: str = [[False for x in range(s + 1 )] for y in range(n + 1 )]
for i in range(1 , n + 1 ):
__lowerCAmelCase: Tuple = True
for i in range(1 , s + 1 ):
__lowerCAmelCase: Any = False
for i in range(1 , n + 1 ):
for j in range(1 , s + 1 ):
__lowerCAmelCase: Optional[int] = dp[i][j - 1]
if arr[i - 1] <= j:
__lowerCAmelCase: Union[str, Any] = dp[i][j] or dp[i - 1][j - arr[i - 1]]
for j in range(int(s / 2 ) , -1 , -1 ):
if dp[n][j] is True:
__lowerCAmelCase: Tuple = s - 2 * j
break
return diff
| 322 | 1 |
from typing import Optional, Tuple, Union
import torch
from einops import rearrange, reduce
from diffusers import DDIMScheduler, DDPMScheduler, DiffusionPipeline, ImagePipelineOutput, UNetaDConditionModel
from diffusers.schedulers.scheduling_ddim import DDIMSchedulerOutput
from diffusers.schedulers.scheduling_ddpm import DDPMSchedulerOutput
_a = 8
def _a ( SCREAMING_SNAKE_CASE : Dict , SCREAMING_SNAKE_CASE : Dict=BITS ) -> Union[str, Any]:
"""simple docstring"""
__lowerCAmelCase: Union[str, Any] = x.device
__lowerCAmelCase: List[Any] = (x * 2_55).int().clamp(0 , 2_55 )
__lowerCAmelCase: Dict = 2 ** torch.arange(bits - 1 , -1 , -1 , device=SCREAMING_SNAKE_CASE )
__lowerCAmelCase: Union[str, Any] = rearrange(SCREAMING_SNAKE_CASE , 'd -> d 1 1' )
__lowerCAmelCase: Any = rearrange(SCREAMING_SNAKE_CASE , 'b c h w -> b c 1 h w' )
__lowerCAmelCase: List[Any] = ((x & mask) != 0).float()
__lowerCAmelCase: Any = rearrange(SCREAMING_SNAKE_CASE , 'b c d h w -> b (c d) h w' )
__lowerCAmelCase: str = bits * 2 - 1
return bits
def _a ( SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : Any=BITS ) -> Optional[Any]:
"""simple docstring"""
__lowerCAmelCase: List[Any] = x.device
__lowerCAmelCase: List[str] = (x > 0).int()
__lowerCAmelCase: List[str] = 2 ** torch.arange(bits - 1 , -1 , -1 , device=SCREAMING_SNAKE_CASE , dtype=torch.intaa )
__lowerCAmelCase: Union[str, Any] = rearrange(SCREAMING_SNAKE_CASE , 'd -> d 1 1' )
__lowerCAmelCase: Union[str, Any] = rearrange(SCREAMING_SNAKE_CASE , 'b (c d) h w -> b c d h w' , d=8 )
__lowerCAmelCase: Optional[Any] = reduce(x * mask , 'b c d h w -> b c h w' , 'sum' )
return (dec / 2_55).clamp(0.0 , 1.0 )
def _a ( self : Optional[int] , SCREAMING_SNAKE_CASE : torch.FloatTensor , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : torch.FloatTensor , SCREAMING_SNAKE_CASE : float = 0.0 , SCREAMING_SNAKE_CASE : bool = True , SCREAMING_SNAKE_CASE : List[str]=None , SCREAMING_SNAKE_CASE : bool = True , ) -> Union[DDIMSchedulerOutput, Tuple]:
"""simple docstring"""
if self.num_inference_steps is None:
raise ValueError(
'Number of inference steps is \'None\', you need to run \'set_timesteps\' after creating the scheduler' )
# See formulas (12) and (16) of DDIM paper https://arxiv.org/pdf/2010.02502.pdf
# Ideally, read DDIM paper in-detail understanding
# Notation (<variable name> -> <name in paper>
# - pred_noise_t -> e_theta(x_t, t)
# - pred_original_sample -> f_theta(x_t, t) or x_0
# - std_dev_t -> sigma_t
# - eta -> η
# - pred_sample_direction -> "direction pointing to x_t"
# - pred_prev_sample -> "x_t-1"
# 1. get previous step value (=t-1)
__lowerCAmelCase: List[Any] = timestep - self.config.num_train_timesteps // self.num_inference_steps
# 2. compute alphas, betas
__lowerCAmelCase: Optional[Any] = self.alphas_cumprod[timestep]
__lowerCAmelCase: int = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.final_alpha_cumprod
__lowerCAmelCase: Union[str, Any] = 1 - alpha_prod_t
# 3. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
__lowerCAmelCase: int = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
# 4. Clip "predicted x_0"
__lowerCAmelCase: List[str] = self.bit_scale
if self.config.clip_sample:
__lowerCAmelCase: Tuple = torch.clamp(SCREAMING_SNAKE_CASE , -scale , SCREAMING_SNAKE_CASE )
# 5. compute variance: "sigma_t(η)" -> see formula (16)
# σ_t = sqrt((1 − α_t−1)/(1 − α_t)) * sqrt(1 − α_t/α_t−1)
__lowerCAmelCase: Union[str, Any] = self._get_variance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
__lowerCAmelCase: str = eta * variance ** 0.5
if use_clipped_model_output:
# the model_output is always re-derived from the clipped x_0 in Glide
__lowerCAmelCase: str = (sample - alpha_prod_t ** 0.5 * pred_original_sample) / beta_prod_t ** 0.5
# 6. compute "direction pointing to x_t" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
__lowerCAmelCase: Optional[Any] = (1 - alpha_prod_t_prev - std_dev_t**2) ** 0.5 * model_output
# 7. compute x_t without "random noise" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
__lowerCAmelCase: Tuple = alpha_prod_t_prev ** 0.5 * pred_original_sample + pred_sample_direction
if eta > 0:
# randn_like does not support generator https://github.com/pytorch/pytorch/issues/27072
__lowerCAmelCase: Optional[int] = model_output.device if torch.is_tensor(SCREAMING_SNAKE_CASE ) else 'cpu'
__lowerCAmelCase: Union[str, Any] = torch.randn(model_output.shape , dtype=model_output.dtype , generator=SCREAMING_SNAKE_CASE ).to(SCREAMING_SNAKE_CASE )
__lowerCAmelCase: Dict = self._get_variance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) ** 0.5 * eta * noise
__lowerCAmelCase: Optional[Any] = prev_sample + variance
if not return_dict:
return (prev_sample,)
return DDIMSchedulerOutput(prev_sample=SCREAMING_SNAKE_CASE , pred_original_sample=SCREAMING_SNAKE_CASE )
def _a ( self : Union[str, Any] , SCREAMING_SNAKE_CASE : torch.FloatTensor , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : torch.FloatTensor , SCREAMING_SNAKE_CASE : Any="epsilon" , SCREAMING_SNAKE_CASE : str=None , SCREAMING_SNAKE_CASE : bool = True , ) -> Union[DDPMSchedulerOutput, Tuple]:
"""simple docstring"""
__lowerCAmelCase: Optional[Any] = timestep
if model_output.shape[1] == sample.shape[1] * 2 and self.variance_type in ["learned", "learned_range"]:
__lowerCAmelCase , __lowerCAmelCase: Optional[Any] = torch.split(SCREAMING_SNAKE_CASE , sample.shape[1] , dim=1 )
else:
__lowerCAmelCase: Any = None
# 1. compute alphas, betas
__lowerCAmelCase: List[str] = self.alphas_cumprod[t]
__lowerCAmelCase: Optional[int] = self.alphas_cumprod[t - 1] if t > 0 else self.one
__lowerCAmelCase: Optional[Any] = 1 - alpha_prod_t
__lowerCAmelCase: List[Any] = 1 - alpha_prod_t_prev
# 2. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf
if prediction_type == "epsilon":
__lowerCAmelCase: Tuple = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
elif prediction_type == "sample":
__lowerCAmelCase: Optional[Any] = model_output
else:
raise ValueError(f'''Unsupported prediction_type {prediction_type}.''' )
# 3. Clip "predicted x_0"
__lowerCAmelCase: Union[str, Any] = self.bit_scale
if self.config.clip_sample:
__lowerCAmelCase: int = torch.clamp(SCREAMING_SNAKE_CASE , -scale , SCREAMING_SNAKE_CASE )
# 4. Compute coefficients for pred_original_sample x_0 and current sample x_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
__lowerCAmelCase: Optional[int] = (alpha_prod_t_prev ** 0.5 * self.betas[t]) / beta_prod_t
__lowerCAmelCase: Optional[Any] = self.alphas[t] ** 0.5 * beta_prod_t_prev / beta_prod_t
# 5. Compute predicted previous sample µ_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
__lowerCAmelCase: int = pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample
# 6. Add noise
__lowerCAmelCase: Union[str, Any] = 0
if t > 0:
__lowerCAmelCase: List[Any] = torch.randn(
model_output.size() , dtype=model_output.dtype , layout=model_output.layout , generator=SCREAMING_SNAKE_CASE ).to(model_output.device )
__lowerCAmelCase: Optional[Any] = (self._get_variance(SCREAMING_SNAKE_CASE , predicted_variance=SCREAMING_SNAKE_CASE ) ** 0.5) * noise
__lowerCAmelCase: Optional[int] = pred_prev_sample + variance
if not return_dict:
return (pred_prev_sample,)
return DDPMSchedulerOutput(prev_sample=SCREAMING_SNAKE_CASE , pred_original_sample=SCREAMING_SNAKE_CASE )
class A_ ( snake_case__ ):
def __init__( self : Optional[int] , UpperCAmelCase : UNetaDConditionModel , UpperCAmelCase : Union[DDIMScheduler, DDPMScheduler] , UpperCAmelCase : Optional[float] = 1.0 , ) -> str:
super().__init__()
__lowerCAmelCase: List[str] = bit_scale
__lowerCAmelCase: str = (
ddim_bit_scheduler_step if isinstance(UpperCAmelCase , UpperCAmelCase ) else ddpm_bit_scheduler_step
)
self.register_modules(unet=UpperCAmelCase , scheduler=UpperCAmelCase )
@torch.no_grad()
def __call__( self : int , UpperCAmelCase : Optional[int] = 2_5_6 , UpperCAmelCase : Optional[int] = 2_5_6 , UpperCAmelCase : Optional[int] = 5_0 , UpperCAmelCase : Optional[torch.Generator] = None , UpperCAmelCase : Optional[int] = 1 , UpperCAmelCase : Optional[str] = "pil" , UpperCAmelCase : bool = True , **UpperCAmelCase : str , ) -> Union[Tuple, ImagePipelineOutput]:
__lowerCAmelCase: str = torch.randn(
(batch_size, self.unet.config.in_channels, height, width) , generator=UpperCAmelCase , )
__lowerCAmelCase: str = decimal_to_bits(UpperCAmelCase ) * self.bit_scale
__lowerCAmelCase: str = latents.to(self.device )
self.scheduler.set_timesteps(UpperCAmelCase )
for t in self.progress_bar(self.scheduler.timesteps ):
# predict the noise residual
__lowerCAmelCase: Any = self.unet(UpperCAmelCase , UpperCAmelCase ).sample
# compute the previous noisy sample x_t -> x_t-1
__lowerCAmelCase: Optional[int] = self.scheduler.step(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ).prev_sample
__lowerCAmelCase: Tuple = bits_to_decimal(UpperCAmelCase )
if output_type == "pil":
__lowerCAmelCase: Optional[int] = self.numpy_to_pil(UpperCAmelCase )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=UpperCAmelCase )
| 322 |
from __future__ import annotations
def _a ( SCREAMING_SNAKE_CASE : list[int] , SCREAMING_SNAKE_CASE : int ) -> list[int]:
"""simple docstring"""
__lowerCAmelCase: int = 0
__lowerCAmelCase: Tuple = len(SCREAMING_SNAKE_CASE ) - 1
while i < j:
if nums[i] + nums[j] == target:
return [i, j]
elif nums[i] + nums[j] < target:
__lowerCAmelCase: Tuple = i + 1
else:
__lowerCAmelCase: List[str] = j - 1
return []
if __name__ == "__main__":
import doctest
doctest.testmod()
print(f"{two_pointer([2, 7, 1_1, 1_5], 9) = }")
| 322 | 1 |
import json
import os
import unittest
from transformers.models.gptsan_japanese.tokenization_gptsan_japanese import (
VOCAB_FILES_NAMES,
GPTSanJapaneseTokenizer,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class A_ ( snake_case__ , unittest.TestCase ):
_lowercase : Union[str, Any] = GPTSanJapaneseTokenizer
_lowercase : int = False
_lowercase : List[Any] = {'do_clean_text': False, 'add_prefix_space': False}
def UpperCAmelCase ( self : Dict ) -> Optional[int]:
super().setUp()
# fmt: off
__lowerCAmelCase: int = ['こん', 'こんに', 'にちは', 'ばんは', '世界,㔺界', '、', '。', '<BR>', '<SP>', '<TAB>', '<URL>', '<EMAIL>', '<TEL>', '<DATE>', '<PRICE>', '<BLOCK>', '<KIGOU>', '<U2000U2BFF>', '<|emoji1|>', '<unk>', '<|bagoftoken|>', '<|endoftext|>']
# fmt: on
__lowerCAmelCase: Union[str, Any] = {'emoji': {'\ud83d\ude00': '<|emoji1|>'}, 'emoji_inv': {'<|emoji1|>': '\ud83d\ude00'}} # 😀
__lowerCAmelCase: Dict = {'unk_token': '<unk>'}
__lowerCAmelCase: Tuple = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
__lowerCAmelCase: Dict = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['emoji_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) )
with open(self.emoji_file , 'w' ) as emoji_writer:
emoji_writer.write(json.dumps(UpperCAmelCase ) )
def UpperCAmelCase ( self : int , **UpperCAmelCase : Optional[Any] ) -> Optional[int]:
kwargs.update(self.special_tokens_map )
return GPTSanJapaneseTokenizer.from_pretrained(self.tmpdirname , **UpperCAmelCase )
def UpperCAmelCase ( self : str , UpperCAmelCase : List[Any] ) -> int:
__lowerCAmelCase: Any = 'こんにちは、世界。 \nこんばんは、㔺界。😀'
__lowerCAmelCase: Optional[Any] = 'こんにちは、世界。 \nこんばんは、世界。😀'
return input_text, output_text
def UpperCAmelCase ( self : List[Any] , UpperCAmelCase : Optional[int] ) -> List[str]:
__lowerCAmelCase , __lowerCAmelCase: Any = self.get_input_output_texts(UpperCAmelCase )
__lowerCAmelCase: Union[str, Any] = tokenizer.encode(UpperCAmelCase , add_special_tokens=UpperCAmelCase )
__lowerCAmelCase: List[Any] = tokenizer.decode(UpperCAmelCase , clean_up_tokenization_spaces=UpperCAmelCase )
return text, ids
def UpperCAmelCase ( self : List[str] ) -> str:
pass # TODO add if relevant
def UpperCAmelCase ( self : List[Any] ) -> Optional[int]:
pass # TODO add if relevant
def UpperCAmelCase ( self : Optional[int] ) -> Optional[Any]:
pass # TODO add if relevant
def UpperCAmelCase ( self : Dict ) -> str:
__lowerCAmelCase: int = self.get_tokenizer()
# Testing tokenization
__lowerCAmelCase: Any = 'こんにちは、世界。 こんばんは、㔺界。'
__lowerCAmelCase: Union[str, Any] = ['こん', 'にちは', '、', '世界', '。', '<SP>', 'こん', 'ばんは', '、', '㔺界', '。']
__lowerCAmelCase: Optional[Any] = tokenizer.tokenize(UpperCAmelCase )
self.assertListEqual(UpperCAmelCase , UpperCAmelCase )
# Testing conversion to ids without special tokens
__lowerCAmelCase: List[Any] = [0, 2, 5, 4, 6, 8, 0, 3, 5, 4, 6]
__lowerCAmelCase: Optional[int] = tokenizer.convert_tokens_to_ids(UpperCAmelCase )
self.assertListEqual(UpperCAmelCase , UpperCAmelCase )
# Testing conversion to ids with special tokens
__lowerCAmelCase: Union[str, Any] = tokens + [tokenizer.unk_token]
__lowerCAmelCase: Tuple = [0, 2, 5, 4, 6, 8, 0, 3, 5, 4, 6, 1_9]
__lowerCAmelCase: Dict = tokenizer.convert_tokens_to_ids(UpperCAmelCase )
self.assertListEqual(UpperCAmelCase , UpperCAmelCase )
def UpperCAmelCase ( self : List[str] ) -> Optional[int]:
__lowerCAmelCase: Tuple = self.get_tokenizer()
# Testing tokenization
__lowerCAmelCase: Union[str, Any] = 'こんにちは、<|bagoftoken|>世界。こんばんは、<|bagoftoken|>㔺界。'
__lowerCAmelCase: Optional[Any] = 'こんにちは、、、、世界。こんばんは、、、、世界。'
__lowerCAmelCase: List[Any] = tokenizer.encode(UpperCAmelCase )
__lowerCAmelCase: Optional[Any] = tokenizer.decode(UpperCAmelCase )
self.assertEqual(UpperCAmelCase , UpperCAmelCase )
@slow
def UpperCAmelCase ( self : int ) -> Optional[int]:
__lowerCAmelCase: Union[str, Any] = self.tokenizer_class.from_pretrained('Tanrei/GPTSAN-japanese' )
# Testing tokenization
__lowerCAmelCase: Union[str, Any] = 'こんにちは、世界。'
__lowerCAmelCase: Optional[int] = 'こんばんは、㔺界。😀'
__lowerCAmelCase: Any = 'こんにちは、世界。こんばんは、世界。😀'
__lowerCAmelCase: Union[str, Any] = tokenizer.encode(prefix_text + input_text )
__lowerCAmelCase: List[Any] = tokenizer.encode('' , prefix_text=prefix_text + input_text )
__lowerCAmelCase: List[Any] = tokenizer.encode(UpperCAmelCase , prefix_text=UpperCAmelCase )
__lowerCAmelCase: Union[str, Any] = tokenizer.decode(UpperCAmelCase )
__lowerCAmelCase: str = tokenizer.decode(UpperCAmelCase )
__lowerCAmelCase: Dict = tokenizer.decode(UpperCAmelCase )
self.assertEqual(UpperCAmelCase , UpperCAmelCase )
self.assertEqual(UpperCAmelCase , UpperCAmelCase )
self.assertEqual(UpperCAmelCase , UpperCAmelCase )
@slow
def UpperCAmelCase ( self : Union[str, Any] ) -> int:
__lowerCAmelCase: Any = self.tokenizer_class.from_pretrained('Tanrei/GPTSAN-japanese' )
# Testing tokenization
__lowerCAmelCase: Optional[Any] = 'こんにちは、世界。'
__lowerCAmelCase: List[str] = 'こんばんは、㔺界。😀'
__lowerCAmelCase: str = len(tokenizer.encode(UpperCAmelCase ) ) - 2
__lowerCAmelCase: Union[str, Any] = len(tokenizer.encode(UpperCAmelCase ) ) - 2
__lowerCAmelCase: Dict = [1] + [0] * (len_prefix + len_text + 1)
__lowerCAmelCase: Dict = [1] * (len_prefix + len_text + 1) + [0]
__lowerCAmelCase: str = [1] + [1] * (len_prefix) + [0] * (len_text + 1)
__lowerCAmelCase: Union[str, Any] = tokenizer(prefix_text + input_text ).token_type_ids
__lowerCAmelCase: Optional[int] = tokenizer('' , prefix_text=prefix_text + input_text ).token_type_ids
__lowerCAmelCase: int = tokenizer(UpperCAmelCase , prefix_text=UpperCAmelCase ).token_type_ids
self.assertListEqual(UpperCAmelCase , UpperCAmelCase )
self.assertListEqual(UpperCAmelCase , UpperCAmelCase )
self.assertListEqual(UpperCAmelCase , UpperCAmelCase )
@slow
def UpperCAmelCase ( self : List[Any] ) -> int:
__lowerCAmelCase: Tuple = self.tokenizer_class.from_pretrained('Tanrei/GPTSAN-japanese' )
__lowerCAmelCase: Union[str, Any] = tokenizer.encode('あンいワ' )
__lowerCAmelCase: Union[str, Any] = tokenizer.encode('' , prefix_text='あンいワ' )
__lowerCAmelCase: int = tokenizer.encode('いワ' , prefix_text='あン' )
self.assertEqual(tokenizer.decode(UpperCAmelCase ) , tokenizer.decode(UpperCAmelCase ) )
self.assertEqual(tokenizer.decode(UpperCAmelCase ) , tokenizer.decode(UpperCAmelCase ) )
self.assertNotEqual(UpperCAmelCase , UpperCAmelCase )
self.assertNotEqual(UpperCAmelCase , UpperCAmelCase )
self.assertEqual(x_token_a[1] , x_token_a[-1] ) # SEG token
self.assertEqual(x_token_a[1] , x_token_a[3] ) # SEG token
@slow
def UpperCAmelCase ( self : Union[str, Any] ) -> Optional[int]:
__lowerCAmelCase: Optional[int] = self.tokenizer_class.from_pretrained('Tanrei/GPTSAN-japanese' )
__lowerCAmelCase: List[str] = [['武田信玄', 'は、'], ['織田信長', 'の配下の、']]
__lowerCAmelCase: Optional[int] = tokenizer(UpperCAmelCase , padding=UpperCAmelCase )
__lowerCAmelCase: Union[str, Any] = tokenizer.batch_encode_plus(UpperCAmelCase , padding=UpperCAmelCase )
# fmt: off
__lowerCAmelCase: Union[str, Any] = [[3_5_9_9_3, 8_6_4_0, 2_5_9_4_8, 3_5_9_9_8, 3_0_6_4_7, 3_5_6_7_5, 3_5_9_9_9, 3_5_9_9_9], [3_5_9_9_3, 1_0_3_8_2, 9_8_6_8, 3_5_9_9_8, 3_0_6_4_6, 9_4_5_9, 3_0_6_4_6, 3_5_6_7_5]]
__lowerCAmelCase: List[str] = [[1, 1, 1, 0, 0, 0, 0, 0], [1, 1, 1, 0, 0, 0, 0, 0]]
__lowerCAmelCase: Any = [[1, 1, 1, 1, 1, 1, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1]]
# fmt: on
self.assertListEqual(x_token.input_ids , UpperCAmelCase )
self.assertListEqual(x_token.token_type_ids , UpperCAmelCase )
self.assertListEqual(x_token.attention_mask , UpperCAmelCase )
self.assertListEqual(x_token_a.input_ids , UpperCAmelCase )
self.assertListEqual(x_token_a.token_type_ids , UpperCAmelCase )
self.assertListEqual(x_token_a.attention_mask , UpperCAmelCase )
def UpperCAmelCase ( self : List[str] ) -> Union[str, Any]:
# Intentionally convert some words to accommodate character fluctuations unique to Japanese
pass
def UpperCAmelCase ( self : Dict ) -> str:
# tokenizer has no padding token
pass
| 322 |
import importlib
import math
import os
from dataclasses import dataclass
from enum import Enum
from typing import Any, Dict, Optional, Tuple, Union
import flax
import jax.numpy as jnp
from ..utils import BaseOutput
_a = '''scheduler_config.json'''
class A_ ( snake_case__ ):
_lowercase : Optional[Any] = 1
_lowercase : Tuple = 2
_lowercase : Dict = 3
_lowercase : int = 4
_lowercase : Optional[Any] = 5
@dataclass
class A_ ( snake_case__ ):
_lowercase : jnp.ndarray
class A_ :
_lowercase : Optional[int] = SCHEDULER_CONFIG_NAME
_lowercase : Dict = ['dtype']
_lowercase : int = []
_lowercase : Union[str, Any] = True
@classmethod
def UpperCAmelCase ( cls : Union[str, Any] , UpperCAmelCase : Dict[str, Any] = None , UpperCAmelCase : Optional[str] = None , UpperCAmelCase : List[str]=False , **UpperCAmelCase : Optional[int] , ) -> Tuple:
__lowerCAmelCase , __lowerCAmelCase: Optional[Any] = cls.load_config(
pretrained_model_name_or_path=UpperCAmelCase , subfolder=UpperCAmelCase , return_unused_kwargs=UpperCAmelCase , **UpperCAmelCase , )
__lowerCAmelCase , __lowerCAmelCase: Optional[Any] = cls.from_config(UpperCAmelCase , return_unused_kwargs=UpperCAmelCase , **UpperCAmelCase )
if hasattr(UpperCAmelCase , 'create_state' ) and getattr(UpperCAmelCase , 'has_state' , UpperCAmelCase ):
__lowerCAmelCase: Dict = scheduler.create_state()
if return_unused_kwargs:
return scheduler, state, unused_kwargs
return scheduler, state
def UpperCAmelCase ( self : Tuple , UpperCAmelCase : Union[str, os.PathLike] , UpperCAmelCase : bool = False , **UpperCAmelCase : Any ) -> List[str]:
self.save_config(save_directory=UpperCAmelCase , push_to_hub=UpperCAmelCase , **UpperCAmelCase )
@property
def UpperCAmelCase ( self : str ) -> Dict:
return self._get_compatibles()
@classmethod
def UpperCAmelCase ( cls : Optional[int] ) -> Any:
__lowerCAmelCase: Optional[int] = list(set([cls.__name__] + cls._compatibles ) )
__lowerCAmelCase: Dict = importlib.import_module(__name__.split('.' )[0] )
__lowerCAmelCase: Dict = [
getattr(UpperCAmelCase , UpperCAmelCase ) for c in compatible_classes_str if hasattr(UpperCAmelCase , UpperCAmelCase )
]
return compatible_classes
def _a ( SCREAMING_SNAKE_CASE : jnp.ndarray , SCREAMING_SNAKE_CASE : Tuple[int] ) -> jnp.ndarray:
"""simple docstring"""
assert len(SCREAMING_SNAKE_CASE ) >= x.ndim
return jnp.broadcast_to(x.reshape(x.shape + (1,) * (len(SCREAMING_SNAKE_CASE ) - x.ndim) ) , SCREAMING_SNAKE_CASE )
def _a ( SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : Any=0.9_9_9 , SCREAMING_SNAKE_CASE : List[Any]=jnp.floataa ) -> jnp.ndarray:
"""simple docstring"""
def alpha_bar(SCREAMING_SNAKE_CASE : str ):
return math.cos((time_step + 0.0_0_8) / 1.0_0_8 * math.pi / 2 ) ** 2
__lowerCAmelCase: str = []
for i in range(SCREAMING_SNAKE_CASE ):
__lowerCAmelCase: Union[str, Any] = i / num_diffusion_timesteps
__lowerCAmelCase: List[str] = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar(SCREAMING_SNAKE_CASE ) / alpha_bar(SCREAMING_SNAKE_CASE ) , SCREAMING_SNAKE_CASE ) )
return jnp.array(SCREAMING_SNAKE_CASE , dtype=SCREAMING_SNAKE_CASE )
@flax.struct.dataclass
class A_ :
_lowercase : jnp.ndarray
_lowercase : jnp.ndarray
_lowercase : jnp.ndarray
@classmethod
def UpperCAmelCase ( cls : str , UpperCAmelCase : Optional[int] ) -> Any:
__lowerCAmelCase: str = scheduler.config
if config.trained_betas is not None:
__lowerCAmelCase: Tuple = jnp.asarray(config.trained_betas , dtype=scheduler.dtype )
elif config.beta_schedule == "linear":
__lowerCAmelCase: Any = jnp.linspace(config.beta_start , config.beta_end , config.num_train_timesteps , dtype=scheduler.dtype )
elif config.beta_schedule == "scaled_linear":
# this schedule is very specific to the latent diffusion model.
__lowerCAmelCase: List[Any] = (
jnp.linspace(
config.beta_start**0.5 , config.beta_end**0.5 , config.num_train_timesteps , dtype=scheduler.dtype )
** 2
)
elif config.beta_schedule == "squaredcos_cap_v2":
# Glide cosine schedule
__lowerCAmelCase: str = betas_for_alpha_bar(config.num_train_timesteps , dtype=scheduler.dtype )
else:
raise NotImplementedError(
F'''beta_schedule {config.beta_schedule} is not implemented for scheduler {scheduler.__class__.__name__}''' )
__lowerCAmelCase: Optional[Any] = 1.0 - betas
__lowerCAmelCase: Optional[Any] = jnp.cumprod(UpperCAmelCase , axis=0 )
return cls(
alphas=UpperCAmelCase , betas=UpperCAmelCase , alphas_cumprod=UpperCAmelCase , )
def _a ( SCREAMING_SNAKE_CASE : CommonSchedulerState , SCREAMING_SNAKE_CASE : jnp.ndarray , SCREAMING_SNAKE_CASE : jnp.ndarray , SCREAMING_SNAKE_CASE : jnp.ndarray ) -> int:
"""simple docstring"""
__lowerCAmelCase: Optional[int] = state.alphas_cumprod
__lowerCAmelCase: str = alphas_cumprod[timesteps] ** 0.5
__lowerCAmelCase: Any = sqrt_alpha_prod.flatten()
__lowerCAmelCase: Any = broadcast_to_shape_from_left(SCREAMING_SNAKE_CASE , original_samples.shape )
__lowerCAmelCase: Any = (1 - alphas_cumprod[timesteps]) ** 0.5
__lowerCAmelCase: str = sqrt_one_minus_alpha_prod.flatten()
__lowerCAmelCase: str = broadcast_to_shape_from_left(SCREAMING_SNAKE_CASE , original_samples.shape )
return sqrt_alpha_prod, sqrt_one_minus_alpha_prod
def _a ( SCREAMING_SNAKE_CASE : CommonSchedulerState , SCREAMING_SNAKE_CASE : jnp.ndarray , SCREAMING_SNAKE_CASE : jnp.ndarray , SCREAMING_SNAKE_CASE : jnp.ndarray ) -> str:
"""simple docstring"""
__lowerCAmelCase , __lowerCAmelCase: Union[str, Any] = get_sqrt_alpha_prod(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
__lowerCAmelCase: Optional[int] = sqrt_alpha_prod * original_samples + sqrt_one_minus_alpha_prod * noise
return noisy_samples
def _a ( SCREAMING_SNAKE_CASE : CommonSchedulerState , SCREAMING_SNAKE_CASE : jnp.ndarray , SCREAMING_SNAKE_CASE : jnp.ndarray , SCREAMING_SNAKE_CASE : jnp.ndarray ) -> Any:
"""simple docstring"""
__lowerCAmelCase , __lowerCAmelCase: Tuple = get_sqrt_alpha_prod(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
__lowerCAmelCase: int = sqrt_alpha_prod * noise - sqrt_one_minus_alpha_prod * sample
return velocity
| 322 | 1 |
import glob
import os
import random
from string import ascii_lowercase, digits
import cva
_a = ''''''
_a = ''''''
_a = ''''''
_a = 1 # (0 is vertical, 1 is horizontal)
def _a ( ) -> None:
"""simple docstring"""
__lowerCAmelCase , __lowerCAmelCase: Any = get_dataset(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
print('Processing...' )
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase: Dict = update_image_and_anno(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
for index, image in enumerate(SCREAMING_SNAKE_CASE ):
# Get random string code: '7b7ad245cdff75241935e4dd860f3bad'
__lowerCAmelCase: Union[str, Any] = random_chars(32 )
__lowerCAmelCase: Tuple = paths[index].split(os.sep )[-1].rsplit('.' , 1 )[0]
__lowerCAmelCase: Optional[Any] = f'''{OUTPUT_DIR}/{file_name}_FLIP_{letter_code}'''
cva.imwrite(f'''/{file_root}.jpg''' , SCREAMING_SNAKE_CASE , [cva.IMWRITE_JPEG_QUALITY, 85] )
print(f'''Success {index+1}/{len(SCREAMING_SNAKE_CASE )} with {file_name}''' )
__lowerCAmelCase: Optional[int] = []
for anno in new_annos[index]:
__lowerCAmelCase: Optional[int] = f'''{anno[0]} {anno[1]} {anno[2]} {anno[3]} {anno[4]}'''
annos_list.append(SCREAMING_SNAKE_CASE )
with open(f'''/{file_root}.txt''' , 'w' ) as outfile:
outfile.write('\n'.join(line for line in annos_list ) )
def _a ( SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : str ) -> tuple[list, list]:
"""simple docstring"""
__lowerCAmelCase: Any = []
__lowerCAmelCase: Optional[int] = []
for label_file in glob.glob(os.path.join(SCREAMING_SNAKE_CASE , '*.txt' ) ):
__lowerCAmelCase: Union[str, Any] = label_file.split(os.sep )[-1].rsplit('.' , 1 )[0]
with open(SCREAMING_SNAKE_CASE ) as in_file:
__lowerCAmelCase: Tuple = in_file.readlines()
__lowerCAmelCase: Tuple = os.path.join(SCREAMING_SNAKE_CASE , f'''{label_name}.jpg''' )
__lowerCAmelCase: str = []
for obj_list in obj_lists:
__lowerCAmelCase: int = obj_list.rstrip('\n' ).split(' ' )
boxes.append(
[
int(obj[0] ),
float(obj[1] ),
float(obj[2] ),
float(obj[3] ),
float(obj[4] ),
] )
if not boxes:
continue
img_paths.append(SCREAMING_SNAKE_CASE )
labels.append(SCREAMING_SNAKE_CASE )
return img_paths, labels
def _a ( SCREAMING_SNAKE_CASE : list , SCREAMING_SNAKE_CASE : list , SCREAMING_SNAKE_CASE : int = 1 ) -> tuple[list, list, list]:
"""simple docstring"""
__lowerCAmelCase: Tuple = []
__lowerCAmelCase: Optional[int] = []
__lowerCAmelCase: List[str] = []
for idx in range(len(SCREAMING_SNAKE_CASE ) ):
__lowerCAmelCase: List[Any] = []
__lowerCAmelCase: Any = img_list[idx]
path_list.append(SCREAMING_SNAKE_CASE )
__lowerCAmelCase: Dict = anno_list[idx]
__lowerCAmelCase: List[Any] = cva.imread(SCREAMING_SNAKE_CASE )
if flip_type == 1:
__lowerCAmelCase: Dict = cva.flip(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
for bbox in img_annos:
__lowerCAmelCase: Any = 1 - bbox[1]
new_annos.append([bbox[0], x_center_new, bbox[2], bbox[3], bbox[4]] )
elif flip_type == 0:
__lowerCAmelCase: Optional[Any] = cva.flip(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
for bbox in img_annos:
__lowerCAmelCase: List[str] = 1 - bbox[2]
new_annos.append([bbox[0], bbox[1], y_center_new, bbox[3], bbox[4]] )
new_annos_lists.append(SCREAMING_SNAKE_CASE )
new_imgs_list.append(SCREAMING_SNAKE_CASE )
return new_imgs_list, new_annos_lists, path_list
def _a ( SCREAMING_SNAKE_CASE : int = 32 ) -> str:
"""simple docstring"""
assert number_char > 1, "The number of character should greater than 1"
__lowerCAmelCase: Tuple = ascii_lowercase + digits
return "".join(random.choice(SCREAMING_SNAKE_CASE ) for _ in range(SCREAMING_SNAKE_CASE ) )
if __name__ == "__main__":
main()
print('''DONE ✅''')
| 322 |
_a = {
'''A''': ['''B''', '''C''', '''E'''],
'''B''': ['''A''', '''D''', '''E'''],
'''C''': ['''A''', '''F''', '''G'''],
'''D''': ['''B'''],
'''E''': ['''A''', '''B''', '''D'''],
'''F''': ['''C'''],
'''G''': ['''C'''],
}
def _a ( SCREAMING_SNAKE_CASE : dict , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : Any ) -> list[str]:
"""simple docstring"""
__lowerCAmelCase: int = set()
# keep track of all the paths to be checked
__lowerCAmelCase: str = [[start]]
# return path if start is goal
if start == goal:
return [start]
# keeps looping until all possible paths have been checked
while queue:
# pop the first path from the queue
__lowerCAmelCase: str = queue.pop(0 )
# get the last node from the path
__lowerCAmelCase: Union[str, Any] = path[-1]
if node not in explored:
__lowerCAmelCase: Dict = graph[node]
# go through all neighbour nodes, construct a new path and
# push it into the queue
for neighbour in neighbours:
__lowerCAmelCase: Dict = list(SCREAMING_SNAKE_CASE )
new_path.append(SCREAMING_SNAKE_CASE )
queue.append(SCREAMING_SNAKE_CASE )
# return path if neighbour is goal
if neighbour == goal:
return new_path
# mark node as explored
explored.add(SCREAMING_SNAKE_CASE )
# in case there's no path between the 2 nodes
return []
def _a ( SCREAMING_SNAKE_CASE : dict , SCREAMING_SNAKE_CASE : Union[str, Any] , SCREAMING_SNAKE_CASE : Any ) -> int:
"""simple docstring"""
if not graph or start not in graph or target not in graph:
return -1
if start == target:
return 0
__lowerCAmelCase: Optional[int] = [start]
__lowerCAmelCase: Dict = set(SCREAMING_SNAKE_CASE )
# Keep tab on distances from `start` node.
__lowerCAmelCase: Optional[int] = {start: 0, target: -1}
while queue:
__lowerCAmelCase: Any = queue.pop(0 )
if node == target:
__lowerCAmelCase: Optional[int] = (
dist[node] if dist[target] == -1 else min(dist[target] , dist[node] )
)
for adjacent in graph[node]:
if adjacent not in visited:
visited.add(SCREAMING_SNAKE_CASE )
queue.append(SCREAMING_SNAKE_CASE )
__lowerCAmelCase: Union[str, Any] = dist[node] + 1
return dist[target]
if __name__ == "__main__":
print(bfs_shortest_path(demo_graph, '''G''', '''D''')) # returns ['G', 'C', 'A', 'B', 'D']
print(bfs_shortest_path_distance(demo_graph, '''G''', '''D''')) # returns 4
| 322 | 1 |
from __future__ import annotations
def _a ( SCREAMING_SNAKE_CASE : list[float] ) -> bool:
"""simple docstring"""
if len(SCREAMING_SNAKE_CASE ) < 2:
raise ValueError('Monogons and Digons are not polygons in the Euclidean space' )
if any(i <= 0 for i in nums ):
raise ValueError('All values must be greater than 0' )
__lowerCAmelCase: Dict = nums.copy()
copy_nums.sort()
return copy_nums[-1] < sum(copy_nums[:-1] )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 322 |
import warnings
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class A_ ( snake_case__ ):
_lowercase : int = ['image_processor', 'tokenizer']
_lowercase : Union[str, Any] = 'LayoutLMv3ImageProcessor'
_lowercase : List[str] = ('LayoutLMv3Tokenizer', 'LayoutLMv3TokenizerFast')
def __init__( self : Any , UpperCAmelCase : Dict=None , UpperCAmelCase : Tuple=None , **UpperCAmelCase : Optional[Any] ) -> str:
__lowerCAmelCase: str = None
if "feature_extractor" in kwargs:
warnings.warn(
'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'
' instead.' , UpperCAmelCase , )
__lowerCAmelCase: List[Any] = kwargs.pop('feature_extractor' )
__lowerCAmelCase: Tuple = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('You need to specify an `image_processor`.' )
if tokenizer is None:
raise ValueError('You need to specify a `tokenizer`.' )
super().__init__(UpperCAmelCase , UpperCAmelCase )
def __call__( self : Optional[int] , UpperCAmelCase : List[Any] , UpperCAmelCase : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , UpperCAmelCase : Optional[Union[PreTokenizedInput, List[PreTokenizedInput]]] = None , UpperCAmelCase : Union[List[List[int]], List[List[List[int]]]] = None , UpperCAmelCase : Optional[Union[List[int], List[List[int]]]] = None , UpperCAmelCase : bool = True , UpperCAmelCase : Union[bool, str, PaddingStrategy] = False , UpperCAmelCase : Union[bool, str, TruncationStrategy] = None , UpperCAmelCase : Optional[int] = None , UpperCAmelCase : int = 0 , UpperCAmelCase : Optional[int] = None , UpperCAmelCase : Optional[bool] = None , UpperCAmelCase : Optional[bool] = None , UpperCAmelCase : bool = False , UpperCAmelCase : bool = False , UpperCAmelCase : bool = False , UpperCAmelCase : bool = False , UpperCAmelCase : bool = True , UpperCAmelCase : Optional[Union[str, TensorType]] = None , **UpperCAmelCase : Tuple , ) -> BatchEncoding:
# verify input
if self.image_processor.apply_ocr and (boxes is not None):
raise ValueError(
'You cannot provide bounding boxes if you initialized the image processor with apply_ocr set to True.' )
if self.image_processor.apply_ocr and (word_labels is not None):
raise ValueError(
'You cannot provide word labels if you initialized the image processor with apply_ocr set to True.' )
# first, apply the image processor
__lowerCAmelCase: str = self.image_processor(images=UpperCAmelCase , return_tensors=UpperCAmelCase )
# second, apply the tokenizer
if text is not None and self.image_processor.apply_ocr and text_pair is None:
if isinstance(UpperCAmelCase , UpperCAmelCase ):
__lowerCAmelCase: Tuple = [text] # add batch dimension (as the image processor always adds a batch dimension)
__lowerCAmelCase: List[str] = features['words']
__lowerCAmelCase: List[Any] = self.tokenizer(
text=text if text is not None else features['words'] , text_pair=text_pair if text_pair is not None else None , boxes=boxes if boxes is not None else features['boxes'] , word_labels=UpperCAmelCase , add_special_tokens=UpperCAmelCase , padding=UpperCAmelCase , truncation=UpperCAmelCase , max_length=UpperCAmelCase , stride=UpperCAmelCase , pad_to_multiple_of=UpperCAmelCase , return_token_type_ids=UpperCAmelCase , return_attention_mask=UpperCAmelCase , return_overflowing_tokens=UpperCAmelCase , return_special_tokens_mask=UpperCAmelCase , return_offsets_mapping=UpperCAmelCase , return_length=UpperCAmelCase , verbose=UpperCAmelCase , return_tensors=UpperCAmelCase , **UpperCAmelCase , )
# add pixel values
__lowerCAmelCase: Tuple = features.pop('pixel_values' )
if return_overflowing_tokens is True:
__lowerCAmelCase: int = self.get_overflowing_images(UpperCAmelCase , encoded_inputs['overflow_to_sample_mapping'] )
__lowerCAmelCase: str = images
return encoded_inputs
def UpperCAmelCase ( self : Optional[int] , UpperCAmelCase : List[Any] , UpperCAmelCase : Union[str, Any] ) -> List[str]:
# in case there's an overflow, ensure each `input_ids` sample is mapped to its corresponding image
__lowerCAmelCase: str = []
for sample_idx in overflow_to_sample_mapping:
images_with_overflow.append(images[sample_idx] )
if len(UpperCAmelCase ) != len(UpperCAmelCase ):
raise ValueError(
'Expected length of images to be the same as the length of `overflow_to_sample_mapping`, but got'
F''' {len(UpperCAmelCase )} and {len(UpperCAmelCase )}''' )
return images_with_overflow
def UpperCAmelCase ( self : Optional[int] , *UpperCAmelCase : Optional[int] , **UpperCAmelCase : Dict ) -> Union[str, Any]:
return self.tokenizer.batch_decode(*UpperCAmelCase , **UpperCAmelCase )
def UpperCAmelCase ( self : Any , *UpperCAmelCase : Dict , **UpperCAmelCase : Any ) -> List[str]:
return self.tokenizer.decode(*UpperCAmelCase , **UpperCAmelCase )
@property
def UpperCAmelCase ( self : Union[str, Any] ) -> str:
return ["input_ids", "bbox", "attention_mask", "pixel_values"]
@property
def UpperCAmelCase ( self : str ) -> Union[str, Any]:
warnings.warn(
'`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.' , UpperCAmelCase , )
return self.image_processor_class
@property
def UpperCAmelCase ( self : Union[str, Any] ) -> Optional[Any]:
warnings.warn(
'`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.' , UpperCAmelCase , )
return self.image_processor
| 322 | 1 |
from timeit import timeit
_a = {
'''MALAYALAM''': True,
'''String''': False,
'''rotor''': True,
'''level''': True,
'''A''': True,
'''BB''': True,
'''ABC''': False,
'''amanaplanacanalpanama''': True, # "a man a plan a canal panama"
}
# Ensure our test data is valid
assert all((key == key[::-1]) is value for key, value in test_data.items())
def _a ( SCREAMING_SNAKE_CASE : str ) -> bool:
"""simple docstring"""
__lowerCAmelCase: Any = 0
__lowerCAmelCase: Any = len(SCREAMING_SNAKE_CASE ) - 1
while start_i < end_i:
if s[start_i] == s[end_i]:
start_i += 1
end_i -= 1
else:
return False
return True
def _a ( SCREAMING_SNAKE_CASE : str ) -> bool:
"""simple docstring"""
__lowerCAmelCase: Tuple = len(SCREAMING_SNAKE_CASE ) // 2
__lowerCAmelCase: Optional[Any] = len(SCREAMING_SNAKE_CASE )
# We need to traverse till half of the length of string
# as we can get access of the i'th last element from
# i'th index.
# eg: [0,1,2,3,4,5] => 4th index can be accessed
# with the help of 1st index (i==n-i-1)
# where n is length of string
return all(s[i] == s[n - i - 1] for i in range(SCREAMING_SNAKE_CASE ) )
def _a ( SCREAMING_SNAKE_CASE : str ) -> bool:
"""simple docstring"""
if len(SCREAMING_SNAKE_CASE ) <= 2:
return True
if s[0] == s[len(SCREAMING_SNAKE_CASE ) - 1]:
return is_palindrome_recursive(s[1:-1] )
else:
return False
def _a ( SCREAMING_SNAKE_CASE : str ) -> bool:
"""simple docstring"""
return s == s[::-1]
def _a ( SCREAMING_SNAKE_CASE : str ) -> None:
"""simple docstring"""
__lowerCAmelCase: Tuple = f'''all({name}(key) is value for key, value in test_data.items())'''
__lowerCAmelCase: List[Any] = f'''from __main__ import test_data, {name}'''
__lowerCAmelCase: List[str] = 50_00_00
__lowerCAmelCase: Union[str, Any] = timeit(stmt=SCREAMING_SNAKE_CASE , setup=SCREAMING_SNAKE_CASE , number=SCREAMING_SNAKE_CASE )
print(f'''{name:<35} finished {number:,} runs in {result:.5f} seconds''' )
if __name__ == "__main__":
for key, value in test_data.items():
assert is_palindrome(key) is is_palindrome_recursive(key)
assert is_palindrome(key) is is_palindrome_slice(key)
print(f"{key:21} {value}")
print('''a man a plan a canal panama''')
# finished 500,000 runs in 0.46793 seconds
benchmark_function('''is_palindrome_slice''')
# finished 500,000 runs in 0.85234 seconds
benchmark_function('''is_palindrome''')
# finished 500,000 runs in 1.32028 seconds
benchmark_function('''is_palindrome_recursive''')
# finished 500,000 runs in 2.08679 seconds
benchmark_function('''is_palindrome_traversal''')
| 322 |
import argparse
from pathlib import Path
import torch
from packaging import version
from torch.onnx import export
from diffusers import AutoencoderKL
_a = version.parse(version.parse(torch.__version__).base_version) < version.parse('''1.11''')
def _a ( SCREAMING_SNAKE_CASE : Any , SCREAMING_SNAKE_CASE : tuple , SCREAMING_SNAKE_CASE : Path , SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : List[Any] , SCREAMING_SNAKE_CASE : Union[str, Any] , SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : Optional[int]=False , ) -> str:
"""simple docstring"""
output_path.parent.mkdir(parents=SCREAMING_SNAKE_CASE , exist_ok=SCREAMING_SNAKE_CASE )
# PyTorch deprecated the `enable_onnx_checker` and `use_external_data_format` arguments in v1.11,
# so we check the torch version for backwards compatibility
if is_torch_less_than_1_11:
export(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , f=output_path.as_posix() , input_names=SCREAMING_SNAKE_CASE , output_names=SCREAMING_SNAKE_CASE , dynamic_axes=SCREAMING_SNAKE_CASE , do_constant_folding=SCREAMING_SNAKE_CASE , use_external_data_format=SCREAMING_SNAKE_CASE , enable_onnx_checker=SCREAMING_SNAKE_CASE , opset_version=SCREAMING_SNAKE_CASE , )
else:
export(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , f=output_path.as_posix() , input_names=SCREAMING_SNAKE_CASE , output_names=SCREAMING_SNAKE_CASE , dynamic_axes=SCREAMING_SNAKE_CASE , do_constant_folding=SCREAMING_SNAKE_CASE , opset_version=SCREAMING_SNAKE_CASE , )
@torch.no_grad()
def _a ( SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : bool = False ) -> Union[str, Any]:
"""simple docstring"""
__lowerCAmelCase: List[Any] = torch.floataa if fpaa else torch.floataa
if fpaa and torch.cuda.is_available():
__lowerCAmelCase: str = 'cuda'
elif fpaa and not torch.cuda.is_available():
raise ValueError('`float16` model export is only supported on GPUs with CUDA' )
else:
__lowerCAmelCase: Dict = 'cpu'
__lowerCAmelCase: Optional[int] = Path(SCREAMING_SNAKE_CASE )
# VAE DECODER
__lowerCAmelCase: Optional[Any] = AutoencoderKL.from_pretrained(model_path + '/vae' )
__lowerCAmelCase: Union[str, Any] = vae_decoder.config.latent_channels
# forward only through the decoder part
__lowerCAmelCase: Any = vae_decoder.decode
onnx_export(
SCREAMING_SNAKE_CASE , model_args=(
torch.randn(1 , SCREAMING_SNAKE_CASE , 25 , 25 ).to(device=SCREAMING_SNAKE_CASE , dtype=SCREAMING_SNAKE_CASE ),
False,
) , output_path=output_path / 'vae_decoder' / 'model.onnx' , ordered_input_names=['latent_sample', 'return_dict'] , output_names=['sample'] , dynamic_axes={
'latent_sample': {0: 'batch', 1: 'channels', 2: 'height', 3: 'width'},
} , opset=SCREAMING_SNAKE_CASE , )
del vae_decoder
if __name__ == "__main__":
_a = argparse.ArgumentParser()
parser.add_argument(
'''--model_path''',
type=str,
required=True,
help='''Path to the `diffusers` checkpoint to convert (either a local directory or on the Hub).''',
)
parser.add_argument('''--output_path''', type=str, required=True, help='''Path to the output model.''')
parser.add_argument(
'''--opset''',
default=1_4,
type=int,
help='''The version of the ONNX operator set to use.''',
)
parser.add_argument('''--fp16''', action='''store_true''', default=False, help='''Export the models in `float16` mode''')
_a = parser.parse_args()
print(args.output_path)
convert_models(args.model_path, args.output_path, args.opset, args.fpaa)
print('''SD: Done: ONNX''')
| 322 | 1 |
import logging
from dataclasses import dataclass, field
from typing import Optional
from seqaseq_trainer import arg_to_scheduler
from transformers import TrainingArguments
_a = logging.getLogger(__name__)
@dataclass
class A_ ( snake_case__ ):
_lowercase : Optional[float] = field(
default=0.0 , metadata={'help': 'The label smoothing epsilon to apply (if not zero).'} )
_lowercase : bool = field(default=snake_case__ , metadata={'help': 'Whether to SortishSamler or not.'} )
_lowercase : bool = field(
default=snake_case__ , metadata={'help': 'Whether to use generate to calculate generative metrics (ROUGE, BLEU).'} )
_lowercase : bool = field(default=snake_case__ , metadata={'help': 'whether to use adafactor'} )
_lowercase : Optional[float] = field(
default=snake_case__ , metadata={'help': 'Encoder layer dropout probability. Goes into model.config.'} )
_lowercase : Optional[float] = field(
default=snake_case__ , metadata={'help': 'Decoder layer dropout probability. Goes into model.config.'} )
_lowercase : Optional[float] = field(default=snake_case__ , metadata={'help': 'Dropout probability. Goes into model.config.'} )
_lowercase : Optional[float] = field(
default=snake_case__ , metadata={'help': 'Attention dropout probability. Goes into model.config.'} )
_lowercase : Optional[str] = field(
default='linear' , metadata={'help': F'Which lr scheduler to use. Selected in {sorted(arg_to_scheduler.keys() )}'} , )
| 322 |
def _a ( SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : list[list[int]] ) -> int:
"""simple docstring"""
def update_area_of_max_square(SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : int ) -> int:
# BASE CASE
if row >= rows or col >= cols:
return 0
__lowerCAmelCase: Union[str, Any] = update_area_of_max_square(SCREAMING_SNAKE_CASE , col + 1 )
__lowerCAmelCase: Tuple = update_area_of_max_square(row + 1 , col + 1 )
__lowerCAmelCase: int = update_area_of_max_square(row + 1 , SCREAMING_SNAKE_CASE )
if mat[row][col]:
__lowerCAmelCase: List[str] = 1 + min([right, diagonal, down] )
__lowerCAmelCase: List[str] = max(largest_square_area[0] , SCREAMING_SNAKE_CASE )
return sub_problem_sol
else:
return 0
__lowerCAmelCase: List[str] = [0]
update_area_of_max_square(0 , 0 )
return largest_square_area[0]
def _a ( SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : list[list[int]] ) -> int:
"""simple docstring"""
def update_area_of_max_square_using_dp_array(
SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : list[list[int]] ) -> int:
if row >= rows or col >= cols:
return 0
if dp_array[row][col] != -1:
return dp_array[row][col]
__lowerCAmelCase: List[Any] = update_area_of_max_square_using_dp_array(SCREAMING_SNAKE_CASE , col + 1 , SCREAMING_SNAKE_CASE )
__lowerCAmelCase: Union[str, Any] = update_area_of_max_square_using_dp_array(row + 1 , col + 1 , SCREAMING_SNAKE_CASE )
__lowerCAmelCase: Any = update_area_of_max_square_using_dp_array(row + 1 , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
if mat[row][col]:
__lowerCAmelCase: int = 1 + min([right, diagonal, down] )
__lowerCAmelCase: Union[str, Any] = max(largest_square_area[0] , SCREAMING_SNAKE_CASE )
__lowerCAmelCase: Optional[int] = sub_problem_sol
return sub_problem_sol
else:
return 0
__lowerCAmelCase: int = [0]
__lowerCAmelCase: int = [[-1] * cols for _ in range(SCREAMING_SNAKE_CASE )]
update_area_of_max_square_using_dp_array(0 , 0 , SCREAMING_SNAKE_CASE )
return largest_square_area[0]
def _a ( SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : list[list[int]] ) -> int:
"""simple docstring"""
__lowerCAmelCase: int = [[0] * (cols + 1) for _ in range(rows + 1 )]
__lowerCAmelCase: Optional[Any] = 0
for row in range(rows - 1 , -1 , -1 ):
for col in range(cols - 1 , -1 , -1 ):
__lowerCAmelCase: Union[str, Any] = dp_array[row][col + 1]
__lowerCAmelCase: str = dp_array[row + 1][col + 1]
__lowerCAmelCase: Optional[int] = dp_array[row + 1][col]
if mat[row][col] == 1:
__lowerCAmelCase: Optional[Any] = 1 + min(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
__lowerCAmelCase: str = max(dp_array[row][col] , SCREAMING_SNAKE_CASE )
else:
__lowerCAmelCase: Dict = 0
return largest_square_area
def _a ( SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : list[list[int]] ) -> int:
"""simple docstring"""
__lowerCAmelCase: Tuple = [0] * (cols + 1)
__lowerCAmelCase: Optional[int] = [0] * (cols + 1)
__lowerCAmelCase: str = 0
for row in range(rows - 1 , -1 , -1 ):
for col in range(cols - 1 , -1 , -1 ):
__lowerCAmelCase: int = current_row[col + 1]
__lowerCAmelCase: Union[str, Any] = next_row[col + 1]
__lowerCAmelCase: Any = next_row[col]
if mat[row][col] == 1:
__lowerCAmelCase: str = 1 + min(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
__lowerCAmelCase: str = max(current_row[col] , SCREAMING_SNAKE_CASE )
else:
__lowerCAmelCase: Optional[Any] = 0
__lowerCAmelCase: int = current_row
return largest_square_area
if __name__ == "__main__":
import doctest
doctest.testmod()
print(largest_square_area_in_matrix_bottom_up(2, 2, [[1, 1], [1, 1]]))
| 322 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_a = {
'''configuration_git''': ['''GIT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''GitConfig''', '''GitVisionConfig'''],
'''processing_git''': ['''GitProcessor'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a = [
'''GIT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''GitForCausalLM''',
'''GitModel''',
'''GitPreTrainedModel''',
'''GitVisionModel''',
]
if TYPE_CHECKING:
from .configuration_git import GIT_PRETRAINED_CONFIG_ARCHIVE_MAP, GitConfig, GitVisionConfig
from .processing_git import GitProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_git import (
GIT_PRETRAINED_MODEL_ARCHIVE_LIST,
GitForCausalLM,
GitModel,
GitPreTrainedModel,
GitVisionModel,
)
else:
import sys
_a = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 322 |
import argparse
import json
import os
from tensorflow.core.protobuf.saved_model_pba import SavedModel
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_copies.py
_a = '''.'''
# Internal TensorFlow ops that can be safely ignored (mostly specific to a saved model)
_a = [
'''Assert''',
'''AssignVariableOp''',
'''EmptyTensorList''',
'''MergeV2Checkpoints''',
'''ReadVariableOp''',
'''ResourceGather''',
'''RestoreV2''',
'''SaveV2''',
'''ShardedFilename''',
'''StatefulPartitionedCall''',
'''StaticRegexFullMatch''',
'''VarHandleOp''',
]
def _a ( SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : Dict ) -> Union[str, Any]:
"""simple docstring"""
__lowerCAmelCase: Optional[int] = SavedModel()
__lowerCAmelCase: str = []
with open(os.path.join(SCREAMING_SNAKE_CASE , 'utils' , 'tf_ops' , 'onnx.json' ) ) as f:
__lowerCAmelCase: List[str] = json.load(SCREAMING_SNAKE_CASE )['opsets']
for i in range(1 , opset + 1 ):
onnx_ops.extend(onnx_opsets[str(SCREAMING_SNAKE_CASE )] )
with open(SCREAMING_SNAKE_CASE , 'rb' ) as f:
saved_model.ParseFromString(f.read() )
__lowerCAmelCase: Optional[int] = set()
# Iterate over every metagraph in case there is more than one (a saved model can contain multiple graphs)
for meta_graph in saved_model.meta_graphs:
# Add operations in the graph definition
model_op_names.update(node.op for node in meta_graph.graph_def.node )
# Go through the functions in the graph definition
for func in meta_graph.graph_def.library.function:
# Add operations in each function
model_op_names.update(node.op for node in func.node_def )
# Convert to list, sorted if you want
__lowerCAmelCase: List[str] = sorted(SCREAMING_SNAKE_CASE )
__lowerCAmelCase: Optional[int] = []
for op in model_op_names:
if op not in onnx_ops and op not in INTERNAL_OPS:
incompatible_ops.append(SCREAMING_SNAKE_CASE )
if strict and len(SCREAMING_SNAKE_CASE ) > 0:
raise Exception(f'''Found the following incompatible ops for the opset {opset}:\n''' + incompatible_ops )
elif len(SCREAMING_SNAKE_CASE ) > 0:
print(f'''Found the following incompatible ops for the opset {opset}:''' )
print(*SCREAMING_SNAKE_CASE , sep='\n' )
else:
print(f'''The saved model {saved_model_path} can properly be converted with ONNX.''' )
if __name__ == "__main__":
_a = argparse.ArgumentParser()
parser.add_argument('''--saved_model_path''', help='''Path of the saved model to check (the .pb file).''')
parser.add_argument(
'''--opset''', default=1_2, type=int, help='''The ONNX opset against which the model has to be tested.'''
)
parser.add_argument(
'''--framework''', choices=['''onnx'''], default='''onnx''', help='''Frameworks against which to test the saved model.'''
)
parser.add_argument(
'''--strict''', action='''store_true''', help='''Whether make the checking strict (raise errors) or not (raise warnings)'''
)
_a = parser.parse_args()
if args.framework == "onnx":
onnx_compliancy(args.saved_model_path, args.strict, args.opset)
| 322 | 1 |
# limitations under the License.
# NOTE: This file is deprecated and will be removed in a future version.
# It only exists so that temporarely `from diffusers.pipelines import DiffusionPipeline` works
from .pipelines import DiffusionPipeline, ImagePipelineOutput # noqa: F401
from .utils import deprecate
deprecate(
'''pipelines_utils''',
'''0.22.0''',
'''Importing `DiffusionPipeline` or `ImagePipelineOutput` from diffusers.pipeline_utils is deprecated. Please import from diffusers.pipelines.pipeline_utils instead.''',
standard_warn=False,
stacklevel=3,
)
| 322 |
import math
import qiskit
def _a ( SCREAMING_SNAKE_CASE : int = 1 , SCREAMING_SNAKE_CASE : int = 1 , SCREAMING_SNAKE_CASE : int = 1 ) -> qiskit.result.counts.Counts:
"""simple docstring"""
if (
isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
or isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
or isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
):
raise TypeError('inputs must be integers.' )
if (input_a < 0) or (input_a < 0) or (carry_in < 0):
raise ValueError('inputs must be positive.' )
if (
(math.floor(SCREAMING_SNAKE_CASE ) != input_a)
or (math.floor(SCREAMING_SNAKE_CASE ) != input_a)
or (math.floor(SCREAMING_SNAKE_CASE ) != carry_in)
):
raise ValueError('inputs must be exact integers.' )
if (input_a > 2) or (input_a > 2) or (carry_in > 2):
raise ValueError('inputs must be less or equal to 2.' )
# build registers
__lowerCAmelCase: Union[str, Any] = qiskit.QuantumRegister(4 , 'qr' )
__lowerCAmelCase: List[Any] = qiskit.ClassicalRegister(2 , 'cr' )
# list the entries
__lowerCAmelCase: Any = [input_a, input_a, carry_in]
__lowerCAmelCase: List[str] = qiskit.QuantumCircuit(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
for i in range(0 , 3 ):
if entry[i] == 2:
quantum_circuit.h(SCREAMING_SNAKE_CASE ) # for hadamard entries
elif entry[i] == 1:
quantum_circuit.x(SCREAMING_SNAKE_CASE ) # for 1 entries
elif entry[i] == 0:
quantum_circuit.i(SCREAMING_SNAKE_CASE ) # for 0 entries
# build the circuit
quantum_circuit.ccx(0 , 1 , 3 ) # ccx = toffoli gate
quantum_circuit.cx(0 , 1 )
quantum_circuit.ccx(1 , 2 , 3 )
quantum_circuit.cx(1 , 2 )
quantum_circuit.cx(0 , 1 )
quantum_circuit.measure([2, 3] , SCREAMING_SNAKE_CASE ) # measure the last two qbits
__lowerCAmelCase: List[str] = qiskit.Aer.get_backend('aer_simulator' )
__lowerCAmelCase: List[Any] = qiskit.execute(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , shots=10_00 )
return job.result().get_counts(SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
print(f"Total sum count for state is: {quantum_full_adder(1, 1, 1)}")
| 322 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
_a = {
'''configuration_layoutlmv3''': [
'''LAYOUTLMV3_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''LayoutLMv3Config''',
'''LayoutLMv3OnnxConfig''',
],
'''processing_layoutlmv3''': ['''LayoutLMv3Processor'''],
'''tokenization_layoutlmv3''': ['''LayoutLMv3Tokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a = ['''LayoutLMv3TokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a = [
'''LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''LayoutLMv3ForQuestionAnswering''',
'''LayoutLMv3ForSequenceClassification''',
'''LayoutLMv3ForTokenClassification''',
'''LayoutLMv3Model''',
'''LayoutLMv3PreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a = [
'''TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFLayoutLMv3ForQuestionAnswering''',
'''TFLayoutLMv3ForSequenceClassification''',
'''TFLayoutLMv3ForTokenClassification''',
'''TFLayoutLMv3Model''',
'''TFLayoutLMv3PreTrainedModel''',
]
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a = ['''LayoutLMv3FeatureExtractor''']
_a = ['''LayoutLMv3ImageProcessor''']
if TYPE_CHECKING:
from .configuration_layoutlmva import (
LAYOUTLMV3_PRETRAINED_CONFIG_ARCHIVE_MAP,
LayoutLMvaConfig,
LayoutLMvaOnnxConfig,
)
from .processing_layoutlmva import LayoutLMvaProcessor
from .tokenization_layoutlmva import LayoutLMvaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_layoutlmva_fast import LayoutLMvaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_layoutlmva import (
LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST,
LayoutLMvaForQuestionAnswering,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaModel,
LayoutLMvaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_layoutlmva import (
TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST,
TFLayoutLMvaForQuestionAnswering,
TFLayoutLMvaForSequenceClassification,
TFLayoutLMvaForTokenClassification,
TFLayoutLMvaModel,
TFLayoutLMvaPreTrainedModel,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_layoutlmva import LayoutLMvaFeatureExtractor
from .image_processing_layoutlmva import LayoutLMvaImageProcessor
else:
import sys
_a = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 322 |
from __future__ import annotations
import copy
import inspect
import unittest
import numpy as np
from transformers import is_tf_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_tf, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST,
TF_MODEL_FOR_MULTIPLE_CHOICE_MAPPING,
TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
LayoutLMvaConfig,
TFLayoutLMvaForQuestionAnswering,
TFLayoutLMvaForSequenceClassification,
TFLayoutLMvaForTokenClassification,
TFLayoutLMvaModel,
)
if is_vision_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class A_ :
def __init__( self : Dict , UpperCAmelCase : Optional[Any] , UpperCAmelCase : Optional[int]=2 , UpperCAmelCase : int=3 , UpperCAmelCase : int=4 , UpperCAmelCase : str=2 , UpperCAmelCase : Union[str, Any]=7 , UpperCAmelCase : List[str]=True , UpperCAmelCase : Dict=True , UpperCAmelCase : Tuple=True , UpperCAmelCase : Optional[Any]=True , UpperCAmelCase : Optional[Any]=9_9 , UpperCAmelCase : Tuple=3_6 , UpperCAmelCase : Tuple=2 , UpperCAmelCase : Optional[int]=4 , UpperCAmelCase : Union[str, Any]=3_7 , UpperCAmelCase : Any="gelu" , UpperCAmelCase : Optional[int]=0.1 , UpperCAmelCase : Optional[Any]=0.1 , UpperCAmelCase : List[str]=5_1_2 , UpperCAmelCase : int=1_6 , UpperCAmelCase : Optional[Any]=2 , UpperCAmelCase : Optional[Any]=0.02 , UpperCAmelCase : Optional[Any]=6 , UpperCAmelCase : int=6 , UpperCAmelCase : str=3 , UpperCAmelCase : Any=4 , UpperCAmelCase : Optional[int]=None , UpperCAmelCase : List[str]=1_0_0_0 , ) -> int:
__lowerCAmelCase: List[str] = parent
__lowerCAmelCase: List[str] = batch_size
__lowerCAmelCase: Optional[Any] = num_channels
__lowerCAmelCase: Tuple = image_size
__lowerCAmelCase: str = patch_size
__lowerCAmelCase: List[str] = is_training
__lowerCAmelCase: Union[str, Any] = use_input_mask
__lowerCAmelCase: Union[str, Any] = use_token_type_ids
__lowerCAmelCase: Tuple = use_labels
__lowerCAmelCase: Optional[int] = vocab_size
__lowerCAmelCase: Any = hidden_size
__lowerCAmelCase: Tuple = num_hidden_layers
__lowerCAmelCase: Optional[int] = num_attention_heads
__lowerCAmelCase: Dict = intermediate_size
__lowerCAmelCase: Union[str, Any] = hidden_act
__lowerCAmelCase: str = hidden_dropout_prob
__lowerCAmelCase: str = attention_probs_dropout_prob
__lowerCAmelCase: str = max_position_embeddings
__lowerCAmelCase: str = type_vocab_size
__lowerCAmelCase: Optional[Any] = type_sequence_label_size
__lowerCAmelCase: Union[str, Any] = initializer_range
__lowerCAmelCase: List[str] = coordinate_size
__lowerCAmelCase: Tuple = shape_size
__lowerCAmelCase: List[Any] = num_labels
__lowerCAmelCase: Any = num_choices
__lowerCAmelCase: List[str] = scope
__lowerCAmelCase: Dict = range_bbox
# LayoutLMv3's sequence length equals the number of text tokens + number of patches + 1 (we add 1 for the CLS token)
__lowerCAmelCase: Optional[Any] = text_seq_length
__lowerCAmelCase: List[Any] = (image_size // patch_size) ** 2 + 1
__lowerCAmelCase: int = self.text_seq_length + self.image_seq_length
def UpperCAmelCase ( self : Any ) -> Any:
__lowerCAmelCase: Dict = ids_tensor([self.batch_size, self.text_seq_length] , self.vocab_size )
__lowerCAmelCase: Any = ids_tensor([self.batch_size, self.text_seq_length, 4] , self.range_bbox )
__lowerCAmelCase: str = bbox.numpy()
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
__lowerCAmelCase: Optional[Any] = bbox[i, j, 3]
__lowerCAmelCase: Tuple = bbox[i, j, 1]
__lowerCAmelCase: Dict = tmp_coordinate
if bbox[i, j, 2] < bbox[i, j, 0]:
__lowerCAmelCase: Any = bbox[i, j, 2]
__lowerCAmelCase: int = bbox[i, j, 0]
__lowerCAmelCase: int = tmp_coordinate
__lowerCAmelCase: List[Any] = tf.constant(UpperCAmelCase )
__lowerCAmelCase: Dict = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__lowerCAmelCase: Union[str, Any] = None
if self.use_input_mask:
__lowerCAmelCase: List[Any] = random_attention_mask([self.batch_size, self.text_seq_length] )
__lowerCAmelCase: int = None
if self.use_token_type_ids:
__lowerCAmelCase: List[Any] = ids_tensor([self.batch_size, self.text_seq_length] , self.type_vocab_size )
__lowerCAmelCase: str = None
__lowerCAmelCase: Dict = None
if self.use_labels:
__lowerCAmelCase: Dict = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__lowerCAmelCase: List[str] = ids_tensor([self.batch_size, self.text_seq_length] , self.num_labels )
__lowerCAmelCase: Dict = LayoutLMvaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , coordinate_size=self.coordinate_size , shape_size=self.shape_size , input_size=self.image_size , patch_size=self.patch_size , )
return config, input_ids, bbox, pixel_values, token_type_ids, input_mask, sequence_labels, token_labels
def UpperCAmelCase ( self : Tuple , UpperCAmelCase : str , UpperCAmelCase : Optional[int] , UpperCAmelCase : Optional[int] , UpperCAmelCase : int , UpperCAmelCase : List[Any] , UpperCAmelCase : Tuple ) -> int:
__lowerCAmelCase: Tuple = TFLayoutLMvaModel(config=UpperCAmelCase )
# text + image
__lowerCAmelCase: Dict = model(UpperCAmelCase , pixel_values=UpperCAmelCase , training=UpperCAmelCase )
__lowerCAmelCase: List[str] = model(
UpperCAmelCase , bbox=UpperCAmelCase , pixel_values=UpperCAmelCase , attention_mask=UpperCAmelCase , token_type_ids=UpperCAmelCase , training=UpperCAmelCase , )
__lowerCAmelCase: Optional[Any] = model(UpperCAmelCase , bbox=UpperCAmelCase , pixel_values=UpperCAmelCase , training=UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
# text only
__lowerCAmelCase: str = model(UpperCAmelCase , training=UpperCAmelCase )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.text_seq_length, self.hidden_size) )
# image only
__lowerCAmelCase: List[str] = model({'pixel_values': pixel_values} , training=UpperCAmelCase )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.image_seq_length, self.hidden_size) )
def UpperCAmelCase ( self : Union[str, Any] , UpperCAmelCase : Optional[int] , UpperCAmelCase : Any , UpperCAmelCase : Optional[int] , UpperCAmelCase : List[str] , UpperCAmelCase : int , UpperCAmelCase : Tuple , UpperCAmelCase : List[Any] ) -> int:
__lowerCAmelCase: List[str] = self.num_labels
__lowerCAmelCase: Tuple = TFLayoutLMvaForSequenceClassification(config=UpperCAmelCase )
__lowerCAmelCase: int = model(
UpperCAmelCase , bbox=UpperCAmelCase , pixel_values=UpperCAmelCase , attention_mask=UpperCAmelCase , token_type_ids=UpperCAmelCase , labels=UpperCAmelCase , training=UpperCAmelCase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def UpperCAmelCase ( self : Any , UpperCAmelCase : List[Any] , UpperCAmelCase : Optional[Any] , UpperCAmelCase : int , UpperCAmelCase : int , UpperCAmelCase : List[Any] , UpperCAmelCase : str , UpperCAmelCase : int ) -> Any:
__lowerCAmelCase: Union[str, Any] = self.num_labels
__lowerCAmelCase: List[str] = TFLayoutLMvaForTokenClassification(config=UpperCAmelCase )
__lowerCAmelCase: Any = model(
UpperCAmelCase , bbox=UpperCAmelCase , pixel_values=UpperCAmelCase , attention_mask=UpperCAmelCase , token_type_ids=UpperCAmelCase , labels=UpperCAmelCase , training=UpperCAmelCase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.text_seq_length, self.num_labels) )
def UpperCAmelCase ( self : List[Any] , UpperCAmelCase : Any , UpperCAmelCase : str , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : Any , UpperCAmelCase : str , UpperCAmelCase : List[str] , UpperCAmelCase : List[Any] ) -> Any:
__lowerCAmelCase: str = 2
__lowerCAmelCase: Dict = TFLayoutLMvaForQuestionAnswering(config=UpperCAmelCase )
__lowerCAmelCase: int = model(
UpperCAmelCase , bbox=UpperCAmelCase , pixel_values=UpperCAmelCase , attention_mask=UpperCAmelCase , token_type_ids=UpperCAmelCase , start_positions=UpperCAmelCase , end_positions=UpperCAmelCase , training=UpperCAmelCase , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def UpperCAmelCase ( self : List[str] ) -> Union[str, Any]:
__lowerCAmelCase: Union[str, Any] = self.prepare_config_and_inputs()
((__lowerCAmelCase) , (__lowerCAmelCase) , (__lowerCAmelCase) , (__lowerCAmelCase) , (__lowerCAmelCase) , (__lowerCAmelCase) , (__lowerCAmelCase) , (__lowerCAmelCase)): List[str] = config_and_inputs
__lowerCAmelCase: List[str] = {
'input_ids': input_ids,
'bbox': bbox,
'pixel_values': pixel_values,
'token_type_ids': token_type_ids,
'attention_mask': input_mask,
}
return config, inputs_dict
@require_tf
class A_ ( snake_case__ , snake_case__ , unittest.TestCase ):
_lowercase : List[Any] = (
(
TFLayoutLMvaModel,
TFLayoutLMvaForQuestionAnswering,
TFLayoutLMvaForSequenceClassification,
TFLayoutLMvaForTokenClassification,
)
if is_tf_available()
else ()
)
_lowercase : Tuple = (
{'document-question-answering': TFLayoutLMvaForQuestionAnswering, 'feature-extraction': TFLayoutLMvaModel}
if is_tf_available()
else {}
)
_lowercase : Union[str, Any] = False
_lowercase : Dict = False
_lowercase : Tuple = False
def UpperCAmelCase ( self : Optional[Any] , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : int , UpperCAmelCase : Tuple , UpperCAmelCase : Optional[Any] , UpperCAmelCase : Optional[Any] ) -> List[str]:
return True
def UpperCAmelCase ( self : Tuple , UpperCAmelCase : int , UpperCAmelCase : Tuple , UpperCAmelCase : Dict=False ) -> dict:
__lowerCAmelCase: Optional[Any] = copy.deepcopy(UpperCAmelCase )
if model_class in get_values(UpperCAmelCase ):
__lowerCAmelCase: int = {
k: tf.tile(tf.expand_dims(UpperCAmelCase , 1 ) , (1, self.model_tester.num_choices) + (1,) * (v.ndim - 1) )
if isinstance(UpperCAmelCase , tf.Tensor ) and v.ndim > 0
else v
for k, v in inputs_dict.items()
}
if return_labels:
if model_class in get_values(UpperCAmelCase ):
__lowerCAmelCase: Tuple = tf.ones(self.model_tester.batch_size , dtype=tf.intaa )
elif model_class in get_values(UpperCAmelCase ):
__lowerCAmelCase: Optional[Any] = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa )
__lowerCAmelCase: Union[str, Any] = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa )
elif model_class in get_values(UpperCAmelCase ):
__lowerCAmelCase: Union[str, Any] = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa )
elif model_class in get_values(UpperCAmelCase ):
__lowerCAmelCase: str = tf.zeros(
(self.model_tester.batch_size, self.model_tester.text_seq_length) , dtype=tf.intaa )
return inputs_dict
def UpperCAmelCase ( self : Optional[int] ) -> Optional[Any]:
__lowerCAmelCase: Tuple = TFLayoutLMvaModelTester(self )
__lowerCAmelCase: str = ConfigTester(self , config_class=UpperCAmelCase , hidden_size=3_7 )
def UpperCAmelCase ( self : Tuple ) -> Dict:
self.config_tester.run_common_tests()
def UpperCAmelCase ( self : List[Any] ) -> Tuple:
__lowerCAmelCase , __lowerCAmelCase: Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowerCAmelCase: List[Any] = model_class(UpperCAmelCase )
if getattr(UpperCAmelCase , 'hf_compute_loss' , UpperCAmelCase ):
# The number of elements in the loss should be the same as the number of elements in the label
__lowerCAmelCase: Optional[int] = self._prepare_for_class(inputs_dict.copy() , UpperCAmelCase , return_labels=UpperCAmelCase )
__lowerCAmelCase: List[Any] = prepared_for_class[
sorted(prepared_for_class.keys() - inputs_dict.keys() , reverse=UpperCAmelCase )[0]
]
__lowerCAmelCase: Tuple = added_label.shape.as_list()[:1]
# Test that model correctly compute the loss with kwargs
__lowerCAmelCase: Optional[Any] = self._prepare_for_class(inputs_dict.copy() , UpperCAmelCase , return_labels=UpperCAmelCase )
__lowerCAmelCase: Tuple = prepared_for_class.pop('input_ids' )
__lowerCAmelCase: Union[str, Any] = model(UpperCAmelCase , **UpperCAmelCase )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
# Test that model correctly compute the loss when we mask some positions
__lowerCAmelCase: Union[str, Any] = self._prepare_for_class(inputs_dict.copy() , UpperCAmelCase , return_labels=UpperCAmelCase )
__lowerCAmelCase: Optional[int] = prepared_for_class.pop('input_ids' )
if "labels" in prepared_for_class:
__lowerCAmelCase: str = prepared_for_class['labels'].numpy()
if len(labels.shape ) > 1 and labels.shape[1] != 1:
__lowerCAmelCase: Tuple = -1_0_0
__lowerCAmelCase: Union[str, Any] = tf.convert_to_tensor(UpperCAmelCase )
__lowerCAmelCase: Dict = model(UpperCAmelCase , **UpperCAmelCase )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
self.assertTrue(not np.any(np.isnan(loss.numpy() ) ) )
# Test that model correctly compute the loss with a dict
__lowerCAmelCase: str = self._prepare_for_class(inputs_dict.copy() , UpperCAmelCase , return_labels=UpperCAmelCase )
__lowerCAmelCase: Optional[Any] = model(UpperCAmelCase )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
# Test that model correctly compute the loss with a tuple
__lowerCAmelCase: Any = self._prepare_for_class(inputs_dict.copy() , UpperCAmelCase , return_labels=UpperCAmelCase )
# Get keys that were added with the _prepare_for_class function
__lowerCAmelCase: Tuple = prepared_for_class.keys() - inputs_dict.keys()
__lowerCAmelCase: Dict = inspect.signature(model.call ).parameters
__lowerCAmelCase: Dict = list(signature.keys() )
# Create a dictionary holding the location of the tensors in the tuple
__lowerCAmelCase: str = {0: 'input_ids'}
for label_key in label_keys:
__lowerCAmelCase: Optional[Any] = signature_names.index(UpperCAmelCase )
__lowerCAmelCase: Tuple = label_key
__lowerCAmelCase: Tuple = sorted(tuple_index_mapping.items() )
# Initialize a list with their default values, update the values and convert to a tuple
__lowerCAmelCase: List[Any] = []
for name in signature_names:
if name != "kwargs":
list_input.append(signature[name].default )
for index, value in sorted_tuple_index_mapping:
__lowerCAmelCase: Optional[Any] = prepared_for_class[value]
__lowerCAmelCase: Union[str, Any] = tuple(UpperCAmelCase )
# Send to model
__lowerCAmelCase: Any = model(tuple_input[:-1] )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
def UpperCAmelCase ( self : Dict ) -> Tuple:
(
(
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) ,
): str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
def UpperCAmelCase ( self : Dict ) -> int:
(
(
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) ,
): str = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
__lowerCAmelCase: Tuple = type
self.model_tester.create_and_check_model(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
def UpperCAmelCase ( self : str ) -> List[str]:
(
(
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) ,
): Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
def UpperCAmelCase ( self : int ) -> List[str]:
(
(
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) ,
): Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
def UpperCAmelCase ( self : Tuple ) -> str:
(
(
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) ,
): Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
@slow
def UpperCAmelCase ( self : Union[str, Any] ) -> Tuple:
for model_name in TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowerCAmelCase: Optional[int] = TFLayoutLMvaModel.from_pretrained(UpperCAmelCase )
self.assertIsNotNone(UpperCAmelCase )
def _a ( ) -> Any:
"""simple docstring"""
__lowerCAmelCase: Dict = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_tf
class A_ ( unittest.TestCase ):
@cached_property
def UpperCAmelCase ( self : int ) -> Dict:
return LayoutLMvaImageProcessor(apply_ocr=UpperCAmelCase ) if is_vision_available() else None
@slow
def UpperCAmelCase ( self : Any ) -> List[str]:
__lowerCAmelCase: Any = TFLayoutLMvaModel.from_pretrained('microsoft/layoutlmv3-base' )
__lowerCAmelCase: Tuple = self.default_image_processor
__lowerCAmelCase: str = prepare_img()
__lowerCAmelCase: Optional[int] = image_processor(images=UpperCAmelCase , return_tensors='tf' ).pixel_values
__lowerCAmelCase: Dict = tf.constant([[1, 2]] )
__lowerCAmelCase: str = tf.expand_dims(tf.constant([[1, 2, 3, 4], [5, 6, 7, 8]] ) , axis=0 )
# forward pass
__lowerCAmelCase: List[str] = model(input_ids=UpperCAmelCase , bbox=UpperCAmelCase , pixel_values=UpperCAmelCase , training=UpperCAmelCase )
# verify the logits
__lowerCAmelCase: Tuple = (1, 1_9_9, 7_6_8)
self.assertEqual(outputs.last_hidden_state.shape , UpperCAmelCase )
__lowerCAmelCase: str = tf.constant(
[[-0.0529, 0.3618, 0.1632], [-0.1587, -0.1667, -0.0400], [-0.1557, -0.1671, -0.0505]] )
self.assertTrue(np.allclose(outputs.last_hidden_state[0, :3, :3] , UpperCAmelCase , atol=1E-4 ) )
| 322 | 1 |
_a = {
"joule": 1.0,
"kilojoule": 1_0_0_0,
"megajoule": 1_0_0_0_0_0_0,
"gigajoule": 1_0_0_0_0_0_0_0_0_0,
"wattsecond": 1.0,
"watthour": 3_6_0_0,
"kilowatthour": 3_6_0_0_0_0_0,
"newtonmeter": 1.0,
"calorie_nutr": 4_1_8_6.8,
"kilocalorie_nutr": 4_1_8_6_8_0_0.0_0,
"electronvolt": 1.6_02_17_66_34E-19,
"britishthermalunit_it": 1_0_5_5.0_5_5_8_5,
"footpound": 1.355_818,
}
def _a ( SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : float ) -> float:
"""simple docstring"""
if to_type not in ENERGY_CONVERSION or from_type not in ENERGY_CONVERSION:
__lowerCAmelCase: Optional[int] = (
f'''Incorrect \'from_type\' or \'to_type\' value: {from_type!r}, {to_type!r}\n'''
f'''Valid values are: {', '.join(SCREAMING_SNAKE_CASE )}'''
)
raise ValueError(SCREAMING_SNAKE_CASE )
return value * ENERGY_CONVERSION[from_type] / ENERGY_CONVERSION[to_type]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 322 |
import unittest
import numpy as np
from transformers import AlbertConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.albert.modeling_flax_albert import (
FlaxAlbertForMaskedLM,
FlaxAlbertForMultipleChoice,
FlaxAlbertForPreTraining,
FlaxAlbertForQuestionAnswering,
FlaxAlbertForSequenceClassification,
FlaxAlbertForTokenClassification,
FlaxAlbertModel,
)
class A_ ( unittest.TestCase ):
def __init__( self : List[str] , UpperCAmelCase : Tuple , UpperCAmelCase : Optional[Any]=1_3 , UpperCAmelCase : Optional[int]=7 , UpperCAmelCase : Tuple=True , UpperCAmelCase : str=True , UpperCAmelCase : List[Any]=True , UpperCAmelCase : Union[str, Any]=True , UpperCAmelCase : List[str]=9_9 , UpperCAmelCase : Optional[int]=3_2 , UpperCAmelCase : Dict=5 , UpperCAmelCase : int=4 , UpperCAmelCase : Optional[Any]=3_7 , UpperCAmelCase : List[str]="gelu" , UpperCAmelCase : Tuple=0.1 , UpperCAmelCase : int=0.1 , UpperCAmelCase : str=5_1_2 , UpperCAmelCase : Dict=1_6 , UpperCAmelCase : Union[str, Any]=2 , UpperCAmelCase : int=0.02 , UpperCAmelCase : List[Any]=4 , ) -> Optional[Any]:
__lowerCAmelCase: str = parent
__lowerCAmelCase: Dict = batch_size
__lowerCAmelCase: Optional[int] = seq_length
__lowerCAmelCase: Dict = is_training
__lowerCAmelCase: Optional[Any] = use_attention_mask
__lowerCAmelCase: List[Any] = use_token_type_ids
__lowerCAmelCase: Optional[int] = use_labels
__lowerCAmelCase: Optional[Any] = vocab_size
__lowerCAmelCase: Optional[Any] = hidden_size
__lowerCAmelCase: Tuple = num_hidden_layers
__lowerCAmelCase: List[str] = num_attention_heads
__lowerCAmelCase: int = intermediate_size
__lowerCAmelCase: Union[str, Any] = hidden_act
__lowerCAmelCase: List[Any] = hidden_dropout_prob
__lowerCAmelCase: List[str] = attention_probs_dropout_prob
__lowerCAmelCase: Optional[int] = max_position_embeddings
__lowerCAmelCase: Union[str, Any] = type_vocab_size
__lowerCAmelCase: int = type_sequence_label_size
__lowerCAmelCase: Union[str, Any] = initializer_range
__lowerCAmelCase: Any = num_choices
def UpperCAmelCase ( self : Union[str, Any] ) -> Tuple:
__lowerCAmelCase: List[str] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__lowerCAmelCase: List[Any] = None
if self.use_attention_mask:
__lowerCAmelCase: List[str] = random_attention_mask([self.batch_size, self.seq_length] )
__lowerCAmelCase: Optional[Any] = None
if self.use_token_type_ids:
__lowerCAmelCase: List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__lowerCAmelCase: Optional[int] = AlbertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=UpperCAmelCase , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def UpperCAmelCase ( self : Dict ) -> Any:
__lowerCAmelCase: Optional[int] = self.prepare_config_and_inputs()
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase: Optional[Any] = config_and_inputs
__lowerCAmelCase: Tuple = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': attention_mask}
return config, inputs_dict
@require_flax
class A_ ( snake_case__ , unittest.TestCase ):
_lowercase : Dict = (
(
FlaxAlbertModel,
FlaxAlbertForPreTraining,
FlaxAlbertForMaskedLM,
FlaxAlbertForMultipleChoice,
FlaxAlbertForQuestionAnswering,
FlaxAlbertForSequenceClassification,
FlaxAlbertForTokenClassification,
FlaxAlbertForQuestionAnswering,
)
if is_flax_available()
else ()
)
def UpperCAmelCase ( self : List[str] ) -> Optional[int]:
__lowerCAmelCase: List[Any] = FlaxAlbertModelTester(self )
@slow
def UpperCAmelCase ( self : Tuple ) -> Dict:
for model_class_name in self.all_model_classes:
__lowerCAmelCase: Optional[Any] = model_class_name.from_pretrained('albert-base-v2' )
__lowerCAmelCase: Dict = model(np.ones((1, 1) ) )
self.assertIsNotNone(UpperCAmelCase )
@require_flax
class A_ ( unittest.TestCase ):
@slow
def UpperCAmelCase ( self : Any ) -> Any:
__lowerCAmelCase: List[Any] = FlaxAlbertModel.from_pretrained('albert-base-v2' )
__lowerCAmelCase: Optional[int] = np.array([[0, 3_4_5, 2_3_2, 3_2_8, 7_4_0, 1_4_0, 1_6_9_5, 6_9, 6_0_7_8, 1_5_8_8, 2]] )
__lowerCAmelCase: Tuple = np.array([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
__lowerCAmelCase: Tuple = model(UpperCAmelCase , attention_mask=UpperCAmelCase )[0]
__lowerCAmelCase: str = (1, 1_1, 7_6_8)
self.assertEqual(output.shape , UpperCAmelCase )
__lowerCAmelCase: List[str] = np.array(
[[[-0.6513, 1.5035, -0.2766], [-0.6515, 1.5046, -0.2780], [-0.6512, 1.5049, -0.2784]]] )
self.assertTrue(jnp.allclose(output[:, 1:4, 1:4] , UpperCAmelCase , atol=1E-4 ) )
| 322 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available
_a = {'''tokenization_herbert''': ['''HerbertTokenizer''']}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a = ['''HerbertTokenizerFast''']
if TYPE_CHECKING:
from .tokenization_herbert import HerbertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_herbert_fast import HerbertTokenizerFast
else:
import sys
_a = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 322 |
import json
import os
import shutil
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import AutoConfig, BertConfig, GPTaConfig
from transformers.configuration_utils import PretrainedConfig
from transformers.testing_utils import TOKEN, USER, is_staging_test
sys.path.append(str(Path(__file__).parent.parent / '''utils'''))
from test_module.custom_configuration import CustomConfig # noqa E402
_a = {
'''return_dict''': False,
'''output_hidden_states''': True,
'''output_attentions''': True,
'''torchscript''': True,
'''torch_dtype''': '''float16''',
'''use_bfloat16''': True,
'''tf_legacy_loss''': True,
'''pruned_heads''': {'''a''': 1},
'''tie_word_embeddings''': False,
'''is_decoder''': True,
'''cross_attention_hidden_size''': 1_2_8,
'''add_cross_attention''': True,
'''tie_encoder_decoder''': True,
'''max_length''': 5_0,
'''min_length''': 3,
'''do_sample''': True,
'''early_stopping''': True,
'''num_beams''': 3,
'''num_beam_groups''': 3,
'''diversity_penalty''': 0.5,
'''temperature''': 2.0,
'''top_k''': 1_0,
'''top_p''': 0.7,
'''typical_p''': 0.2,
'''repetition_penalty''': 0.8,
'''length_penalty''': 0.8,
'''no_repeat_ngram_size''': 5,
'''encoder_no_repeat_ngram_size''': 5,
'''bad_words_ids''': [1, 2, 3],
'''num_return_sequences''': 3,
'''chunk_size_feed_forward''': 5,
'''output_scores''': True,
'''return_dict_in_generate''': True,
'''forced_bos_token_id''': 2,
'''forced_eos_token_id''': 3,
'''remove_invalid_values''': True,
'''architectures''': ['''BertModel'''],
'''finetuning_task''': '''translation''',
'''id2label''': {0: '''label'''},
'''label2id''': {'''label''': '''0'''},
'''tokenizer_class''': '''BertTokenizerFast''',
'''prefix''': '''prefix''',
'''bos_token_id''': 6,
'''pad_token_id''': 7,
'''eos_token_id''': 8,
'''sep_token_id''': 9,
'''decoder_start_token_id''': 1_0,
'''exponential_decay_length_penalty''': (5, 1.01),
'''suppress_tokens''': [0, 1],
'''begin_suppress_tokens''': 2,
'''task_specific_params''': {'''translation''': '''some_params'''},
'''problem_type''': '''regression''',
}
@is_staging_test
class A_ ( unittest.TestCase ):
@classmethod
def UpperCAmelCase ( cls : Dict ) -> List[str]:
__lowerCAmelCase: str = TOKEN
HfFolder.save_token(UpperCAmelCase )
@classmethod
def UpperCAmelCase ( cls : str ) -> List[Any]:
try:
delete_repo(token=cls._token , repo_id='test-config' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='valid_org/test-config-org' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='test-dynamic-config' )
except HTTPError:
pass
def UpperCAmelCase ( self : int ) -> Optional[int]:
__lowerCAmelCase: Any = BertConfig(
vocab_size=9_9 , hidden_size=3_2 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=3_7 )
config.push_to_hub('test-config' , use_auth_token=self._token )
__lowerCAmelCase: str = BertConfig.from_pretrained(F'''{USER}/test-config''' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(UpperCAmelCase , getattr(UpperCAmelCase , UpperCAmelCase ) )
# Reset repo
delete_repo(token=self._token , repo_id='test-config' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(UpperCAmelCase , repo_id='test-config' , push_to_hub=UpperCAmelCase , use_auth_token=self._token )
__lowerCAmelCase: Union[str, Any] = BertConfig.from_pretrained(F'''{USER}/test-config''' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(UpperCAmelCase , getattr(UpperCAmelCase , UpperCAmelCase ) )
def UpperCAmelCase ( self : int ) -> Dict:
__lowerCAmelCase: int = BertConfig(
vocab_size=9_9 , hidden_size=3_2 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=3_7 )
config.push_to_hub('valid_org/test-config-org' , use_auth_token=self._token )
__lowerCAmelCase: Dict = BertConfig.from_pretrained('valid_org/test-config-org' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(UpperCAmelCase , getattr(UpperCAmelCase , UpperCAmelCase ) )
# Reset repo
delete_repo(token=self._token , repo_id='valid_org/test-config-org' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(
UpperCAmelCase , repo_id='valid_org/test-config-org' , push_to_hub=UpperCAmelCase , use_auth_token=self._token )
__lowerCAmelCase: int = BertConfig.from_pretrained('valid_org/test-config-org' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(UpperCAmelCase , getattr(UpperCAmelCase , UpperCAmelCase ) )
def UpperCAmelCase ( self : Union[str, Any] ) -> List[str]:
CustomConfig.register_for_auto_class()
__lowerCAmelCase: Any = CustomConfig(attribute=4_2 )
config.push_to_hub('test-dynamic-config' , use_auth_token=self._token )
# This has added the proper auto_map field to the config
self.assertDictEqual(config.auto_map , {'AutoConfig': 'custom_configuration.CustomConfig'} )
__lowerCAmelCase: int = AutoConfig.from_pretrained(F'''{USER}/test-dynamic-config''' , trust_remote_code=UpperCAmelCase )
# Can't make an isinstance check because the new_config is from the FakeConfig class of a dynamic module
self.assertEqual(new_config.__class__.__name__ , 'CustomConfig' )
self.assertEqual(new_config.attribute , 4_2 )
class A_ ( unittest.TestCase ):
def UpperCAmelCase ( self : Union[str, Any] ) -> int:
__lowerCAmelCase: List[Any] = GPTaConfig()
# attempt to modify each of int/float/bool/str config records and verify they were updated
__lowerCAmelCase: Union[str, Any] = c.n_embd + 1 # int
__lowerCAmelCase: str = c.resid_pdrop + 1.0 # float
__lowerCAmelCase: List[Any] = not c.scale_attn_weights # bool
__lowerCAmelCase: List[str] = c.summary_type + 'foo' # str
c.update_from_string(
F'''n_embd={n_embd},resid_pdrop={resid_pdrop},scale_attn_weights={scale_attn_weights},summary_type={summary_type}''' )
self.assertEqual(UpperCAmelCase , c.n_embd , 'mismatch for key: n_embd' )
self.assertEqual(UpperCAmelCase , c.resid_pdrop , 'mismatch for key: resid_pdrop' )
self.assertEqual(UpperCAmelCase , c.scale_attn_weights , 'mismatch for key: scale_attn_weights' )
self.assertEqual(UpperCAmelCase , c.summary_type , 'mismatch for key: summary_type' )
def UpperCAmelCase ( self : Optional[Any] ) -> Any:
__lowerCAmelCase: str = PretrainedConfig()
__lowerCAmelCase: Optional[int] = [key for key in base_config.__dict__ if key not in config_common_kwargs]
# If this part of the test fails, you have arguments to addin config_common_kwargs above.
self.assertListEqual(
UpperCAmelCase , ['is_encoder_decoder', '_name_or_path', '_commit_hash', 'transformers_version'] )
__lowerCAmelCase: int = [key for key, value in config_common_kwargs.items() if value == getattr(UpperCAmelCase , UpperCAmelCase )]
if len(UpperCAmelCase ) > 0:
raise ValueError(
'The following keys are set with the default values in'
' `test_configuration_common.config_common_kwargs` pick another value for them:'
F''' {', '.join(UpperCAmelCase )}.''' )
def UpperCAmelCase ( self : int ) -> Optional[Any]:
with self.assertRaises(UpperCAmelCase ):
# config is in subfolder, the following should not work without specifying the subfolder
__lowerCAmelCase: List[Any] = BertConfig.from_pretrained('hf-internal-testing/tiny-random-bert-subfolder' )
__lowerCAmelCase: List[str] = BertConfig.from_pretrained('hf-internal-testing/tiny-random-bert-subfolder' , subfolder='bert' )
self.assertIsNotNone(UpperCAmelCase )
def UpperCAmelCase ( self : Tuple ) -> List[Any]:
# A mock response for an HTTP head request to emulate server down
__lowerCAmelCase: Union[str, Any] = mock.Mock()
__lowerCAmelCase: str = 5_0_0
__lowerCAmelCase: Optional[Any] = {}
__lowerCAmelCase: Optional[int] = HTTPError
__lowerCAmelCase: List[Any] = {}
# Download this model to make sure it's in the cache.
__lowerCAmelCase: Tuple = BertConfig.from_pretrained('hf-internal-testing/tiny-random-bert' )
# Under the mock environment we get a 500 error when trying to reach the model.
with mock.patch('requests.Session.request' , return_value=UpperCAmelCase ) as mock_head:
__lowerCAmelCase: Union[str, Any] = BertConfig.from_pretrained('hf-internal-testing/tiny-random-bert' )
# This check we did call the fake head request
mock_head.assert_called()
def UpperCAmelCase ( self : Any ) -> Optional[Any]:
# This test is for deprecated behavior and can be removed in v5
__lowerCAmelCase: Tuple = BertConfig.from_pretrained(
'https://huggingface.co/hf-internal-testing/tiny-random-bert/resolve/main/config.json' )
def UpperCAmelCase ( self : Dict ) -> str:
__lowerCAmelCase: Optional[Any] = AutoConfig.from_pretrained('bert-base-cased' )
__lowerCAmelCase: Optional[Any] = ['config.4.0.0.json']
with tempfile.TemporaryDirectory() as tmp_dir:
configuration.save_pretrained(UpperCAmelCase )
__lowerCAmelCase: Tuple = 2
json.dump(configuration.to_dict() , open(os.path.join(UpperCAmelCase , 'config.4.0.0.json' ) , 'w' ) )
# This should pick the new configuration file as the version of Transformers is > 4.0.0
__lowerCAmelCase: Dict = AutoConfig.from_pretrained(UpperCAmelCase )
self.assertEqual(new_configuration.hidden_size , 2 )
# Will need to be adjusted if we reach v42 and this test is still here.
# Should pick the old configuration file as the version of Transformers is < 4.42.0
__lowerCAmelCase: Dict = ['config.42.0.0.json']
__lowerCAmelCase: Optional[int] = 7_6_8
configuration.save_pretrained(UpperCAmelCase )
shutil.move(os.path.join(UpperCAmelCase , 'config.4.0.0.json' ) , os.path.join(UpperCAmelCase , 'config.42.0.0.json' ) )
__lowerCAmelCase: int = AutoConfig.from_pretrained(UpperCAmelCase )
self.assertEqual(new_configuration.hidden_size , 7_6_8 )
def UpperCAmelCase ( self : Union[str, Any] ) -> Dict:
# This repo has two configuration files, one for v4.0.0 and above with a different hidden size.
__lowerCAmelCase: Tuple = 'hf-internal-testing/test-two-configs'
import transformers as new_transformers
__lowerCAmelCase: List[Any] = 'v4.0.0'
__lowerCAmelCase , __lowerCAmelCase: Any = new_transformers.models.auto.AutoConfig.from_pretrained(
UpperCAmelCase , return_unused_kwargs=UpperCAmelCase )
self.assertEqual(new_configuration.hidden_size , 2 )
# This checks `_configuration_file` ia not kept in the kwargs by mistake.
self.assertDictEqual(UpperCAmelCase , {} )
# Testing an older version by monkey-patching the version in the module it's used.
import transformers as old_transformers
__lowerCAmelCase: List[Any] = 'v3.0.0'
__lowerCAmelCase: Union[str, Any] = old_transformers.models.auto.AutoConfig.from_pretrained(UpperCAmelCase )
self.assertEqual(old_configuration.hidden_size , 7_6_8 )
| 322 | 1 |
# coding=utf-8
# Copyright 2020 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# this script dumps information about the environment
import os
import sys
import transformers
_a = '''3'''
print('''Python version:''', sys.version)
print('''transformers version:''', transformers.__version__)
try:
import torch
print('''Torch version:''', torch.__version__)
print('''Cuda available:''', torch.cuda.is_available())
print('''Cuda version:''', torch.version.cuda)
print('''CuDNN version:''', torch.backends.cudnn.version())
print('''Number of GPUs available:''', torch.cuda.device_count())
print('''NCCL version:''', torch.cuda.nccl.version())
except ImportError:
print('''Torch version:''', None)
try:
import deepspeed
print('''DeepSpeed version:''', deepspeed.__version__)
except ImportError:
print('''DeepSpeed version:''', None)
try:
import tensorflow as tf
print('''TensorFlow version:''', tf.__version__)
print('''TF GPUs available:''', bool(tf.config.list_physical_devices('''GPU''')))
print('''Number of TF GPUs available:''', len(tf.config.list_physical_devices('''GPU''')))
except ImportError:
print('''TensorFlow version:''', None)
| 322 |
_a = [sum(int(c, 1_0) ** 2 for c in i.__str__()) for i in range(1_0_0_0_0_0)]
def _a ( SCREAMING_SNAKE_CASE : int ) -> int:
"""simple docstring"""
__lowerCAmelCase: Optional[int] = 0
while number:
# Increased Speed Slightly by checking every 5 digits together.
sum_of_digits_squared += DIGITS_SQUARED[number % 10_00_00]
number //= 10_00_00
return sum_of_digits_squared
# There are 2 Chains made,
# One ends with 89 with the chain member 58 being the one which when declared first,
# there will be the least number of iterations for all the members to be checked.
# The other one ends with 1 and has only one element 1.
# So 58 and 1 are chosen to be declared at the starting.
# Changed dictionary to an array to quicken the solution
_a = [None] * 1_0_0_0_0_0_0_0
_a = True
_a = False
def _a ( SCREAMING_SNAKE_CASE : int ) -> bool:
"""simple docstring"""
if CHAINS[number - 1] is not None:
return CHAINS[number - 1] # type: ignore
__lowerCAmelCase: int = chain(next_number(SCREAMING_SNAKE_CASE ) )
__lowerCAmelCase: Tuple = number_chain
while number < 10_00_00_00:
__lowerCAmelCase: Dict = number_chain
number *= 10
return number_chain
def _a ( SCREAMING_SNAKE_CASE : int = 10_00_00_00 ) -> int:
"""simple docstring"""
for i in range(1 , SCREAMING_SNAKE_CASE ):
if CHAINS[i] is None:
chain(i + 1 )
return CHAINS[:number].count(SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
import doctest
doctest.testmod()
print(f"{solution() = }")
| 322 | 1 |
def _a ( SCREAMING_SNAKE_CASE : list[int] , SCREAMING_SNAKE_CASE : int ) -> bool:
"""simple docstring"""
__lowerCAmelCase: Optional[int] = len(SCREAMING_SNAKE_CASE )
__lowerCAmelCase: Dict = [[False] * (required_sum + 1) for _ in range(arr_len + 1 )]
# for each arr value, a sum of zero(0) can be formed by not taking any element
# hence True/1
for i in range(arr_len + 1 ):
__lowerCAmelCase: Optional[Any] = True
# sum is not zero and set is empty then false
for i in range(1 , required_sum + 1 ):
__lowerCAmelCase: Union[str, Any] = False
for i in range(1 , arr_len + 1 ):
for j in range(1 , required_sum + 1 ):
if arr[i - 1] > j:
__lowerCAmelCase: int = subset[i - 1][j]
if arr[i - 1] <= j:
__lowerCAmelCase: List[Any] = subset[i - 1][j] or subset[i - 1][j - arr[i - 1]]
return subset[arr_len][required_sum]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 322 |
def _a ( SCREAMING_SNAKE_CASE : int ) -> bool:
"""simple docstring"""
if not isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
__lowerCAmelCase: List[Any] = f'''Input value of [number={number}] must be an integer'''
raise TypeError(SCREAMING_SNAKE_CASE )
if number < 0:
return False
__lowerCAmelCase: str = number * number
while number > 0:
if number % 10 != number_square % 10:
return False
number //= 10
number_square //= 10
return True
if __name__ == "__main__":
import doctest
doctest.testmod()
| 322 | 1 |
def _a ( SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : int ) -> str:
"""simple docstring"""
if not isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
raise ValueError('iterations must be defined as integers' )
if not isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) or not number >= 1:
raise ValueError(
'starting number must be\n and integer and be more than 0' )
if not iterations >= 1:
raise ValueError('Iterations must be done more than 0 times to play FizzBuzz' )
__lowerCAmelCase: Tuple = ''
while number <= iterations:
if number % 3 == 0:
out += "Fizz"
if number % 5 == 0:
out += "Buzz"
if 0 not in (number % 3, number % 5):
out += str(SCREAMING_SNAKE_CASE )
# print(out)
number += 1
out += " "
return out
if __name__ == "__main__":
import doctest
doctest.testmod()
| 322 |
import unittest
from transformers import XLMConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
XLMForMultipleChoice,
XLMForQuestionAnswering,
XLMForQuestionAnsweringSimple,
XLMForSequenceClassification,
XLMForTokenClassification,
XLMModel,
XLMWithLMHeadModel,
)
from transformers.models.xlm.modeling_xlm import XLM_PRETRAINED_MODEL_ARCHIVE_LIST
class A_ :
def __init__( self : Any , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : str=1_3 , UpperCAmelCase : Optional[Any]=7 , UpperCAmelCase : str=True , UpperCAmelCase : Any=True , UpperCAmelCase : Tuple=True , UpperCAmelCase : Any=True , UpperCAmelCase : Optional[int]=True , UpperCAmelCase : List[str]=False , UpperCAmelCase : Tuple=False , UpperCAmelCase : int=False , UpperCAmelCase : Optional[int]=2 , UpperCAmelCase : Any=9_9 , UpperCAmelCase : str=0 , UpperCAmelCase : Dict=3_2 , UpperCAmelCase : int=5 , UpperCAmelCase : Optional[int]=4 , UpperCAmelCase : Any=0.1 , UpperCAmelCase : str=0.1 , UpperCAmelCase : int=5_1_2 , UpperCAmelCase : str=2 , UpperCAmelCase : Optional[int]=0.02 , UpperCAmelCase : Optional[Any]=2 , UpperCAmelCase : List[str]=4 , UpperCAmelCase : Dict="last" , UpperCAmelCase : int=True , UpperCAmelCase : Dict=None , UpperCAmelCase : Union[str, Any]=0 , ) -> Dict:
__lowerCAmelCase: Optional[int] = parent
__lowerCAmelCase: Dict = batch_size
__lowerCAmelCase: Tuple = seq_length
__lowerCAmelCase: Tuple = is_training
__lowerCAmelCase: Optional[Any] = use_input_lengths
__lowerCAmelCase: List[str] = use_token_type_ids
__lowerCAmelCase: Dict = use_labels
__lowerCAmelCase: int = gelu_activation
__lowerCAmelCase: Optional[int] = sinusoidal_embeddings
__lowerCAmelCase: Tuple = causal
__lowerCAmelCase: Optional[Any] = asm
__lowerCAmelCase: int = n_langs
__lowerCAmelCase: Tuple = vocab_size
__lowerCAmelCase: List[Any] = n_special
__lowerCAmelCase: List[Any] = hidden_size
__lowerCAmelCase: Union[str, Any] = num_hidden_layers
__lowerCAmelCase: Dict = num_attention_heads
__lowerCAmelCase: int = hidden_dropout_prob
__lowerCAmelCase: List[str] = attention_probs_dropout_prob
__lowerCAmelCase: Dict = max_position_embeddings
__lowerCAmelCase: List[str] = type_sequence_label_size
__lowerCAmelCase: str = initializer_range
__lowerCAmelCase: List[str] = num_labels
__lowerCAmelCase: List[str] = num_choices
__lowerCAmelCase: Optional[int] = summary_type
__lowerCAmelCase: Any = use_proj
__lowerCAmelCase: Optional[Any] = scope
__lowerCAmelCase: Dict = bos_token_id
def UpperCAmelCase ( self : Union[str, Any] ) -> Tuple:
__lowerCAmelCase: Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__lowerCAmelCase: str = random_attention_mask([self.batch_size, self.seq_length] )
__lowerCAmelCase: Any = None
if self.use_input_lengths:
__lowerCAmelCase: Optional[Any] = (
ids_tensor([self.batch_size] , vocab_size=2 ) + self.seq_length - 2
) # small variation of seq_length
__lowerCAmelCase: str = None
if self.use_token_type_ids:
__lowerCAmelCase: Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.n_langs )
__lowerCAmelCase: int = None
__lowerCAmelCase: Optional[int] = None
__lowerCAmelCase: Optional[int] = None
if self.use_labels:
__lowerCAmelCase: Tuple = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__lowerCAmelCase: Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__lowerCAmelCase: Optional[int] = ids_tensor([self.batch_size] , 2 ).float()
__lowerCAmelCase: str = ids_tensor([self.batch_size] , self.num_choices )
__lowerCAmelCase: Dict = self.get_config()
return (
config,
input_ids,
token_type_ids,
input_lengths,
sequence_labels,
token_labels,
is_impossible_labels,
choice_labels,
input_mask,
)
def UpperCAmelCase ( self : Tuple ) -> List[Any]:
return XLMConfig(
vocab_size=self.vocab_size , n_special=self.n_special , emb_dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , gelu_activation=self.gelu_activation , sinusoidal_embeddings=self.sinusoidal_embeddings , asm=self.asm , causal=self.causal , n_langs=self.n_langs , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , summary_type=self.summary_type , use_proj=self.use_proj , num_labels=self.num_labels , bos_token_id=self.bos_token_id , )
def UpperCAmelCase ( self : int , UpperCAmelCase : Optional[Any] , UpperCAmelCase : Tuple , UpperCAmelCase : Optional[int] , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : Dict , UpperCAmelCase : Optional[Any] , UpperCAmelCase : int , UpperCAmelCase : int , UpperCAmelCase : List[str] , ) -> Optional[int]:
__lowerCAmelCase: List[str] = XLMModel(config=UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
__lowerCAmelCase: Any = model(UpperCAmelCase , lengths=UpperCAmelCase , langs=UpperCAmelCase )
__lowerCAmelCase: List[str] = model(UpperCAmelCase , langs=UpperCAmelCase )
__lowerCAmelCase: List[str] = model(UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCAmelCase ( self : Union[str, Any] , UpperCAmelCase : Optional[Any] , UpperCAmelCase : Optional[int] , UpperCAmelCase : Optional[int] , UpperCAmelCase : Dict , UpperCAmelCase : Any , UpperCAmelCase : List[Any] , UpperCAmelCase : Tuple , UpperCAmelCase : Tuple , UpperCAmelCase : Dict , ) -> int:
__lowerCAmelCase: str = XLMWithLMHeadModel(UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
__lowerCAmelCase: Union[str, Any] = model(UpperCAmelCase , token_type_ids=UpperCAmelCase , labels=UpperCAmelCase )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCAmelCase ( self : Optional[int] , UpperCAmelCase : Optional[int] , UpperCAmelCase : Dict , UpperCAmelCase : Optional[int] , UpperCAmelCase : Any , UpperCAmelCase : Optional[int] , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : List[str] , UpperCAmelCase : str , UpperCAmelCase : Dict , ) -> List[str]:
__lowerCAmelCase: Dict = XLMForQuestionAnsweringSimple(UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
__lowerCAmelCase: str = model(UpperCAmelCase )
__lowerCAmelCase: List[str] = model(UpperCAmelCase , start_positions=UpperCAmelCase , end_positions=UpperCAmelCase )
__lowerCAmelCase: Optional[Any] = outputs
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def UpperCAmelCase ( self : List[str] , UpperCAmelCase : Dict , UpperCAmelCase : List[Any] , UpperCAmelCase : Optional[int] , UpperCAmelCase : Optional[Any] , UpperCAmelCase : str , UpperCAmelCase : List[str] , UpperCAmelCase : List[Any] , UpperCAmelCase : Any , UpperCAmelCase : Optional[int] , ) -> Tuple:
__lowerCAmelCase: Union[str, Any] = XLMForQuestionAnswering(UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
__lowerCAmelCase: List[str] = model(UpperCAmelCase )
__lowerCAmelCase: Union[str, Any] = model(
UpperCAmelCase , start_positions=UpperCAmelCase , end_positions=UpperCAmelCase , cls_index=UpperCAmelCase , is_impossible=UpperCAmelCase , p_mask=UpperCAmelCase , )
__lowerCAmelCase: Any = model(
UpperCAmelCase , start_positions=UpperCAmelCase , end_positions=UpperCAmelCase , cls_index=UpperCAmelCase , is_impossible=UpperCAmelCase , )
((__lowerCAmelCase) , ): List[str] = result_with_labels.to_tuple()
__lowerCAmelCase: Union[str, Any] = model(UpperCAmelCase , start_positions=UpperCAmelCase , end_positions=UpperCAmelCase )
((__lowerCAmelCase) , ): List[Any] = result_with_labels.to_tuple()
self.parent.assertEqual(result_with_labels.loss.shape , () )
self.parent.assertEqual(result.start_top_log_probs.shape , (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(result.start_top_index.shape , (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(
result.end_top_log_probs.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(
result.end_top_index.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(result.cls_logits.shape , (self.batch_size,) )
def UpperCAmelCase ( self : Dict , UpperCAmelCase : Dict , UpperCAmelCase : Optional[Any] , UpperCAmelCase : Optional[Any] , UpperCAmelCase : List[str] , UpperCAmelCase : List[str] , UpperCAmelCase : Any , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : str , UpperCAmelCase : List[str] , ) -> List[Any]:
__lowerCAmelCase: Optional[Any] = XLMForSequenceClassification(UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
__lowerCAmelCase: List[Any] = model(UpperCAmelCase )
__lowerCAmelCase: Tuple = model(UpperCAmelCase , labels=UpperCAmelCase )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def UpperCAmelCase ( self : Optional[Any] , UpperCAmelCase : str , UpperCAmelCase : int , UpperCAmelCase : str , UpperCAmelCase : Tuple , UpperCAmelCase : Optional[Any] , UpperCAmelCase : List[str] , UpperCAmelCase : List[str] , UpperCAmelCase : Dict , UpperCAmelCase : Union[str, Any] , ) -> List[Any]:
__lowerCAmelCase: Union[str, Any] = self.num_labels
__lowerCAmelCase: Tuple = XLMForTokenClassification(UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
__lowerCAmelCase: Optional[int] = model(UpperCAmelCase , attention_mask=UpperCAmelCase , labels=UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def UpperCAmelCase ( self : str , UpperCAmelCase : Tuple , UpperCAmelCase : str , UpperCAmelCase : Any , UpperCAmelCase : str , UpperCAmelCase : str , UpperCAmelCase : str , UpperCAmelCase : str , UpperCAmelCase : List[str] , UpperCAmelCase : List[Any] , ) -> Union[str, Any]:
__lowerCAmelCase: List[Any] = self.num_choices
__lowerCAmelCase: Optional[Any] = XLMForMultipleChoice(config=UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
__lowerCAmelCase: List[Any] = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__lowerCAmelCase: List[str] = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__lowerCAmelCase: Dict = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__lowerCAmelCase: Any = model(
UpperCAmelCase , attention_mask=UpperCAmelCase , token_type_ids=UpperCAmelCase , labels=UpperCAmelCase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def UpperCAmelCase ( self : Tuple ) -> int:
__lowerCAmelCase: Optional[Any] = self.prepare_config_and_inputs()
(
(
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) ,
): Union[str, Any] = config_and_inputs
__lowerCAmelCase: Any = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'lengths': input_lengths}
return config, inputs_dict
@require_torch
class A_ ( snake_case__ , snake_case__ , snake_case__ , unittest.TestCase ):
_lowercase : Any = (
(
XLMModel,
XLMWithLMHeadModel,
XLMForQuestionAnswering,
XLMForSequenceClassification,
XLMForQuestionAnsweringSimple,
XLMForTokenClassification,
XLMForMultipleChoice,
)
if is_torch_available()
else ()
)
_lowercase : Any = (
(XLMWithLMHeadModel,) if is_torch_available() else ()
) # TODO (PVP): Check other models whether language generation is also applicable
_lowercase : Optional[int] = (
{
'feature-extraction': XLMModel,
'fill-mask': XLMWithLMHeadModel,
'question-answering': XLMForQuestionAnsweringSimple,
'text-classification': XLMForSequenceClassification,
'text-generation': XLMWithLMHeadModel,
'token-classification': XLMForTokenClassification,
'zero-shot': XLMForSequenceClassification,
}
if is_torch_available()
else {}
)
def UpperCAmelCase ( self : Any , UpperCAmelCase : List[Any] , UpperCAmelCase : Optional[Any] , UpperCAmelCase : str , UpperCAmelCase : str , UpperCAmelCase : str ) -> int:
if (
pipeline_test_casse_name == "QAPipelineTests"
and tokenizer_name is not None
and not tokenizer_name.endswith('Fast' )
):
# `QAPipelineTests` fails for a few models when the slower tokenizer are used.
# (The slower tokenizers were never used for pipeline tests before the pipeline testing rework)
# TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer
return True
return False
def UpperCAmelCase ( self : Any , UpperCAmelCase : Optional[Any] , UpperCAmelCase : Tuple , UpperCAmelCase : Tuple=False ) -> Dict:
__lowerCAmelCase: Optional[Any] = super()._prepare_for_class(UpperCAmelCase , UpperCAmelCase , return_labels=UpperCAmelCase )
if return_labels:
if model_class.__name__ == "XLMForQuestionAnswering":
__lowerCAmelCase: str = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=UpperCAmelCase )
__lowerCAmelCase: Optional[Any] = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=UpperCAmelCase )
return inputs_dict
def UpperCAmelCase ( self : Union[str, Any] ) -> int:
__lowerCAmelCase: int = XLMModelTester(self )
__lowerCAmelCase: Optional[int] = ConfigTester(self , config_class=UpperCAmelCase , emb_dim=3_7 )
def UpperCAmelCase ( self : List[str] ) -> List[Any]:
self.config_tester.run_common_tests()
def UpperCAmelCase ( self : Dict ) -> List[Any]:
__lowerCAmelCase: str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_model(*UpperCAmelCase )
def UpperCAmelCase ( self : List[Any] ) -> int:
__lowerCAmelCase: Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_lm_head(*UpperCAmelCase )
def UpperCAmelCase ( self : Tuple ) -> Tuple:
__lowerCAmelCase: Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_simple_qa(*UpperCAmelCase )
def UpperCAmelCase ( self : Optional[Any] ) -> Tuple:
__lowerCAmelCase: Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_qa(*UpperCAmelCase )
def UpperCAmelCase ( self : Optional[Any] ) -> Any:
__lowerCAmelCase: List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_sequence_classif(*UpperCAmelCase )
def UpperCAmelCase ( self : Tuple ) -> Tuple:
__lowerCAmelCase: Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_token_classif(*UpperCAmelCase )
def UpperCAmelCase ( self : Any ) -> Any:
__lowerCAmelCase: Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_for_multiple_choice(*UpperCAmelCase )
def UpperCAmelCase ( self : Optional[Any] , UpperCAmelCase : int , UpperCAmelCase : Tuple , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : Dict , UpperCAmelCase : Optional[Any] , UpperCAmelCase : Optional[int]=False , UpperCAmelCase : Dict=1 ) -> Dict:
self.assertIsInstance(UpperCAmelCase , UpperCAmelCase )
self.assertListEqual(
[isinstance(UpperCAmelCase , UpperCAmelCase ) for iter_attentions in attentions] , [True] * len(UpperCAmelCase ) )
self.assertEqual(len(UpperCAmelCase ) , (max_length - min_length) * num_beam_groups )
for idx, iter_attentions in enumerate(UpperCAmelCase ):
# adds PAD dummy token
__lowerCAmelCase: int = min_length + idx + 1
__lowerCAmelCase: Union[str, Any] = min_length + idx + 1
__lowerCAmelCase: Any = (
batch_size * num_beam_groups,
config.num_attention_heads,
tgt_len,
src_len,
)
# check attn size
self.assertListEqual(
[layer_attention.shape for layer_attention in iter_attentions] , [expected_shape] * len(UpperCAmelCase ) )
def UpperCAmelCase ( self : Dict , UpperCAmelCase : int , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : Any , UpperCAmelCase : Optional[Any] , UpperCAmelCase : str=False , UpperCAmelCase : Optional[int]=1 ) -> Union[str, Any]:
self.assertIsInstance(UpperCAmelCase , UpperCAmelCase )
self.assertListEqual(
[isinstance(UpperCAmelCase , UpperCAmelCase ) for iter_hidden_states in hidden_states] , [True] * len(UpperCAmelCase ) , )
self.assertEqual(len(UpperCAmelCase ) , (max_length - min_length) * num_beam_groups )
for idx, iter_hidden_states in enumerate(UpperCAmelCase ):
# adds PAD dummy token
__lowerCAmelCase: Any = min_length + idx + 1
__lowerCAmelCase: str = (batch_size * num_beam_groups, seq_len, config.hidden_size)
# check hidden size
self.assertListEqual(
[layer_hidden_states.shape for layer_hidden_states in iter_hidden_states] , [expected_shape] * len(UpperCAmelCase ) , )
pass
@slow
def UpperCAmelCase ( self : int ) -> Tuple:
for model_name in XLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowerCAmelCase: List[Any] = XLMModel.from_pretrained(UpperCAmelCase )
self.assertIsNotNone(UpperCAmelCase )
@require_torch
class A_ ( unittest.TestCase ):
@slow
def UpperCAmelCase ( self : Tuple ) -> Union[str, Any]:
__lowerCAmelCase: Union[str, Any] = XLMWithLMHeadModel.from_pretrained('xlm-mlm-en-2048' )
model.to(UpperCAmelCase )
__lowerCAmelCase: Optional[int] = torch.tensor([[1_4, 4_4_7]] , dtype=torch.long , device=UpperCAmelCase ) # the president
__lowerCAmelCase: Union[str, Any] = [
1_4,
4_4_7,
1_4,
4_4_7,
1_4,
4_4_7,
1_4,
4_4_7,
1_4,
4_4_7,
1_4,
4_4_7,
1_4,
4_4_7,
1_4,
4_4_7,
1_4,
4_4_7,
1_4,
4_4_7,
] # the president the president the president the president the president the president the president the president the president the president
# TODO(PVP): this and other input_ids I tried for generation give pretty bad results. Not sure why. Model might just not be made for auto-regressive inference
__lowerCAmelCase: str = model.generate(UpperCAmelCase , do_sample=UpperCAmelCase )
self.assertListEqual(output_ids[0].cpu().numpy().tolist() , UpperCAmelCase )
| 322 | 1 |
from __future__ import annotations
from PIL import Image
# Define glider example
_a = [
[0, 1, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0, 0],
[1, 1, 1, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
]
# Define blinker example
_a = [[0, 1, 0], [0, 1, 0], [0, 1, 0]]
def _a ( SCREAMING_SNAKE_CASE : list[list[int]] ) -> list[list[int]]:
"""simple docstring"""
__lowerCAmelCase: Optional[Any] = []
for i in range(len(SCREAMING_SNAKE_CASE ) ):
__lowerCAmelCase: Optional[Any] = []
for j in range(len(cells[i] ) ):
# Get the number of live neighbours
__lowerCAmelCase: Any = 0
if i > 0 and j > 0:
neighbour_count += cells[i - 1][j - 1]
if i > 0:
neighbour_count += cells[i - 1][j]
if i > 0 and j < len(cells[i] ) - 1:
neighbour_count += cells[i - 1][j + 1]
if j > 0:
neighbour_count += cells[i][j - 1]
if j < len(cells[i] ) - 1:
neighbour_count += cells[i][j + 1]
if i < len(SCREAMING_SNAKE_CASE ) - 1 and j > 0:
neighbour_count += cells[i + 1][j - 1]
if i < len(SCREAMING_SNAKE_CASE ) - 1:
neighbour_count += cells[i + 1][j]
if i < len(SCREAMING_SNAKE_CASE ) - 1 and j < len(cells[i] ) - 1:
neighbour_count += cells[i + 1][j + 1]
# Rules of the game of life (excerpt from Wikipedia):
# 1. Any live cell with two or three live neighbours survives.
# 2. Any dead cell with three live neighbours becomes a live cell.
# 3. All other live cells die in the next generation.
# Similarly, all other dead cells stay dead.
__lowerCAmelCase: Dict = cells[i][j] == 1
if (
(alive and 2 <= neighbour_count <= 3)
or not alive
and neighbour_count == 3
):
next_generation_row.append(1 )
else:
next_generation_row.append(0 )
next_generation.append(SCREAMING_SNAKE_CASE )
return next_generation
def _a ( SCREAMING_SNAKE_CASE : list[list[int]] , SCREAMING_SNAKE_CASE : int ) -> list[Image.Image]:
"""simple docstring"""
__lowerCAmelCase: int = []
for _ in range(SCREAMING_SNAKE_CASE ):
# Create output image
__lowerCAmelCase: Union[str, Any] = Image.new('RGB' , (len(cells[0] ), len(SCREAMING_SNAKE_CASE )) )
__lowerCAmelCase: List[Any] = img.load()
# Save cells to image
for x in range(len(SCREAMING_SNAKE_CASE ) ):
for y in range(len(cells[0] ) ):
__lowerCAmelCase: List[Any] = 2_55 - cells[y][x] * 2_55
__lowerCAmelCase: Any = (colour, colour, colour)
# Save image
images.append(SCREAMING_SNAKE_CASE )
__lowerCAmelCase: str = new_generation(SCREAMING_SNAKE_CASE )
return images
if __name__ == "__main__":
_a = generate_images(GLIDER, 1_6)
images[0].save('''out.gif''', save_all=True, append_images=images[1:])
| 322 |
def _a ( SCREAMING_SNAKE_CASE : Optional[int] ) -> Optional[int]:
"""simple docstring"""
__lowerCAmelCase: List[Any] = 0
__lowerCAmelCase: Optional[int] = len(SCREAMING_SNAKE_CASE )
for i in range(n - 1 ):
for j in range(i + 1 , SCREAMING_SNAKE_CASE ):
if arr[i] > arr[j]:
num_inversions += 1
return num_inversions
def _a ( SCREAMING_SNAKE_CASE : Any ) -> str:
"""simple docstring"""
if len(SCREAMING_SNAKE_CASE ) <= 1:
return arr, 0
__lowerCAmelCase: str = len(SCREAMING_SNAKE_CASE ) // 2
__lowerCAmelCase: str = arr[0:mid]
__lowerCAmelCase: int = arr[mid:]
__lowerCAmelCase , __lowerCAmelCase: List[Any] = count_inversions_recursive(SCREAMING_SNAKE_CASE )
__lowerCAmelCase , __lowerCAmelCase: Dict = count_inversions_recursive(SCREAMING_SNAKE_CASE )
__lowerCAmelCase , __lowerCAmelCase: int = _count_cross_inversions(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
__lowerCAmelCase: int = inversion_p + inversions_q + cross_inversions
return c, num_inversions
def _a ( SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : Any ) -> Optional[int]:
"""simple docstring"""
__lowerCAmelCase: List[str] = []
__lowerCAmelCase: List[str] = 0
while i < len(SCREAMING_SNAKE_CASE ) and j < len(SCREAMING_SNAKE_CASE ):
if p[i] > q[j]:
# if P[1] > Q[j], then P[k] > Q[k] for all i < k <= len(P)
# These are all inversions. The claim emerges from the
# property that P is sorted.
num_inversion += len(SCREAMING_SNAKE_CASE ) - i
r.append(q[j] )
j += 1
else:
r.append(p[i] )
i += 1
if i < len(SCREAMING_SNAKE_CASE ):
r.extend(p[i:] )
else:
r.extend(q[j:] )
return r, num_inversion
def _a ( ) -> int:
"""simple docstring"""
__lowerCAmelCase: List[Any] = [10, 2, 1, 5, 5, 2, 11]
# this arr has 8 inversions:
# (10, 2), (10, 1), (10, 5), (10, 5), (10, 2), (2, 1), (5, 2), (5, 2)
__lowerCAmelCase: Tuple = count_inversions_bf(SCREAMING_SNAKE_CASE )
__lowerCAmelCase , __lowerCAmelCase: str = count_inversions_recursive(SCREAMING_SNAKE_CASE )
assert num_inversions_bf == num_inversions_recursive == 8
print('number of inversions = ' , SCREAMING_SNAKE_CASE )
# testing an array with zero inversion (a sorted arr_1)
arr_a.sort()
__lowerCAmelCase: Tuple = count_inversions_bf(SCREAMING_SNAKE_CASE )
__lowerCAmelCase , __lowerCAmelCase: Optional[Any] = count_inversions_recursive(SCREAMING_SNAKE_CASE )
assert num_inversions_bf == num_inversions_recursive == 0
print('number of inversions = ' , SCREAMING_SNAKE_CASE )
# an empty list should also have zero inversions
__lowerCAmelCase: int = []
__lowerCAmelCase: Any = count_inversions_bf(SCREAMING_SNAKE_CASE )
__lowerCAmelCase , __lowerCAmelCase: Dict = count_inversions_recursive(SCREAMING_SNAKE_CASE )
assert num_inversions_bf == num_inversions_recursive == 0
print('number of inversions = ' , SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
main()
| 322 | 1 |
from __future__ import annotations
import copy
import tempfile
import unittest
from transformers import CONFIG_MAPPING, AutoConfig, BertConfig, GPTaConfig, TaConfig, TapasConfig, is_tf_available
from transformers.testing_utils import (
DUMMY_UNKNOWN_IDENTIFIER,
SMALL_MODEL_IDENTIFIER,
RequestCounter,
require_tensorflow_probability,
require_tf,
slow,
)
from ..bert.test_modeling_bert import BertModelTester
if is_tf_available():
from transformers import (
TFAutoModel,
TFAutoModelForCausalLM,
TFAutoModelForMaskedLM,
TFAutoModelForPreTraining,
TFAutoModelForQuestionAnswering,
TFAutoModelForSeqaSeqLM,
TFAutoModelForSequenceClassification,
TFAutoModelForTableQuestionAnswering,
TFAutoModelForTokenClassification,
TFAutoModelWithLMHead,
TFBertForMaskedLM,
TFBertForPreTraining,
TFBertForQuestionAnswering,
TFBertForSequenceClassification,
TFBertModel,
TFFunnelBaseModel,
TFFunnelModel,
TFGPTaLMHeadModel,
TFRobertaForMaskedLM,
TFTaForConditionalGeneration,
TFTapasForQuestionAnswering,
)
from transformers.models.auto.modeling_tf_auto import (
TF_MODEL_FOR_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_MASKED_LM_MAPPING,
TF_MODEL_FOR_PRETRAINING_MAPPING,
TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
TF_MODEL_MAPPING,
)
from transformers.models.bert.modeling_tf_bert import TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.gpta.modeling_tf_gpta import TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.ta.modeling_tf_ta import TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.tapas.modeling_tf_tapas import TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST
class A_ ( snake_case__ ):
_lowercase : Any = 'new-model'
if is_tf_available():
class A_ ( snake_case__ ):
_lowercase : List[str] = NewModelConfig
@require_tf
class A_ ( unittest.TestCase ):
@slow
def UpperCAmelCase ( self : str ) -> int:
__lowerCAmelCase: List[Any] = 'bert-base-cased'
__lowerCAmelCase: List[str] = AutoConfig.from_pretrained(UpperCAmelCase )
self.assertIsNotNone(UpperCAmelCase )
self.assertIsInstance(UpperCAmelCase , UpperCAmelCase )
__lowerCAmelCase: Dict = TFAutoModel.from_pretrained(UpperCAmelCase )
self.assertIsNotNone(UpperCAmelCase )
self.assertIsInstance(UpperCAmelCase , UpperCAmelCase )
@slow
def UpperCAmelCase ( self : int ) -> List[Any]:
__lowerCAmelCase: List[str] = 'bert-base-cased'
__lowerCAmelCase: int = AutoConfig.from_pretrained(UpperCAmelCase )
self.assertIsNotNone(UpperCAmelCase )
self.assertIsInstance(UpperCAmelCase , UpperCAmelCase )
__lowerCAmelCase: Dict = TFAutoModelForPreTraining.from_pretrained(UpperCAmelCase )
self.assertIsNotNone(UpperCAmelCase )
self.assertIsInstance(UpperCAmelCase , UpperCAmelCase )
@slow
def UpperCAmelCase ( self : str ) -> List[Any]:
for model_name in TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowerCAmelCase: str = AutoConfig.from_pretrained(UpperCAmelCase )
self.assertIsNotNone(UpperCAmelCase )
self.assertIsInstance(UpperCAmelCase , UpperCAmelCase )
__lowerCAmelCase: Optional[Any] = TFAutoModelForCausalLM.from_pretrained(UpperCAmelCase )
__lowerCAmelCase , __lowerCAmelCase: str = TFAutoModelForCausalLM.from_pretrained(UpperCAmelCase , output_loading_info=UpperCAmelCase )
self.assertIsNotNone(UpperCAmelCase )
self.assertIsInstance(UpperCAmelCase , UpperCAmelCase )
@slow
def UpperCAmelCase ( self : Tuple ) -> List[str]:
for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowerCAmelCase: int = AutoConfig.from_pretrained(UpperCAmelCase )
self.assertIsNotNone(UpperCAmelCase )
self.assertIsInstance(UpperCAmelCase , UpperCAmelCase )
__lowerCAmelCase: List[Any] = TFAutoModelWithLMHead.from_pretrained(UpperCAmelCase )
self.assertIsNotNone(UpperCAmelCase )
self.assertIsInstance(UpperCAmelCase , UpperCAmelCase )
@slow
def UpperCAmelCase ( self : List[Any] ) -> List[str]:
for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowerCAmelCase: Union[str, Any] = AutoConfig.from_pretrained(UpperCAmelCase )
self.assertIsNotNone(UpperCAmelCase )
self.assertIsInstance(UpperCAmelCase , UpperCAmelCase )
__lowerCAmelCase: Tuple = TFAutoModelForMaskedLM.from_pretrained(UpperCAmelCase )
__lowerCAmelCase , __lowerCAmelCase: Tuple = TFAutoModelForMaskedLM.from_pretrained(UpperCAmelCase , output_loading_info=UpperCAmelCase )
self.assertIsNotNone(UpperCAmelCase )
self.assertIsInstance(UpperCAmelCase , UpperCAmelCase )
@slow
def UpperCAmelCase ( self : Optional[Any] ) -> str:
for model_name in TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowerCAmelCase: Any = AutoConfig.from_pretrained(UpperCAmelCase )
self.assertIsNotNone(UpperCAmelCase )
self.assertIsInstance(UpperCAmelCase , UpperCAmelCase )
__lowerCAmelCase: List[Any] = TFAutoModelForSeqaSeqLM.from_pretrained(UpperCAmelCase )
__lowerCAmelCase , __lowerCAmelCase: int = TFAutoModelForSeqaSeqLM.from_pretrained(UpperCAmelCase , output_loading_info=UpperCAmelCase )
self.assertIsNotNone(UpperCAmelCase )
self.assertIsInstance(UpperCAmelCase , UpperCAmelCase )
@slow
def UpperCAmelCase ( self : Union[str, Any] ) -> str:
# for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
for model_name in ["bert-base-uncased"]:
__lowerCAmelCase: List[str] = AutoConfig.from_pretrained(UpperCAmelCase )
self.assertIsNotNone(UpperCAmelCase )
self.assertIsInstance(UpperCAmelCase , UpperCAmelCase )
__lowerCAmelCase: Optional[Any] = TFAutoModelForSequenceClassification.from_pretrained(UpperCAmelCase )
self.assertIsNotNone(UpperCAmelCase )
self.assertIsInstance(UpperCAmelCase , UpperCAmelCase )
@slow
def UpperCAmelCase ( self : List[str] ) -> str:
# for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
for model_name in ["bert-base-uncased"]:
__lowerCAmelCase: str = AutoConfig.from_pretrained(UpperCAmelCase )
self.assertIsNotNone(UpperCAmelCase )
self.assertIsInstance(UpperCAmelCase , UpperCAmelCase )
__lowerCAmelCase: Union[str, Any] = TFAutoModelForQuestionAnswering.from_pretrained(UpperCAmelCase )
self.assertIsNotNone(UpperCAmelCase )
self.assertIsInstance(UpperCAmelCase , UpperCAmelCase )
@slow
@require_tensorflow_probability
def UpperCAmelCase ( self : Union[str, Any] ) -> int:
for model_name in TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST[5:6]:
__lowerCAmelCase: Tuple = AutoConfig.from_pretrained(UpperCAmelCase )
self.assertIsNotNone(UpperCAmelCase )
self.assertIsInstance(UpperCAmelCase , UpperCAmelCase )
__lowerCAmelCase: Optional[Any] = TFAutoModelForTableQuestionAnswering.from_pretrained(UpperCAmelCase )
__lowerCAmelCase , __lowerCAmelCase: Any = TFAutoModelForTableQuestionAnswering.from_pretrained(
UpperCAmelCase , output_loading_info=UpperCAmelCase )
self.assertIsNotNone(UpperCAmelCase )
self.assertIsInstance(UpperCAmelCase , UpperCAmelCase )
def UpperCAmelCase ( self : Dict ) -> Tuple:
__lowerCAmelCase: int = TFAutoModelWithLMHead.from_pretrained(UpperCAmelCase )
self.assertIsInstance(UpperCAmelCase , UpperCAmelCase )
self.assertEqual(model.num_parameters() , 1_4_4_1_0 )
self.assertEqual(model.num_parameters(only_trainable=UpperCAmelCase ) , 1_4_4_1_0 )
def UpperCAmelCase ( self : int ) -> List[str]:
__lowerCAmelCase: int = TFAutoModelWithLMHead.from_pretrained(UpperCAmelCase )
self.assertIsInstance(UpperCAmelCase , UpperCAmelCase )
self.assertEqual(model.num_parameters() , 1_4_4_1_0 )
self.assertEqual(model.num_parameters(only_trainable=UpperCAmelCase ) , 1_4_4_1_0 )
def UpperCAmelCase ( self : Union[str, Any] ) -> Optional[Any]:
# For the auto model mapping, FunnelConfig has two models: FunnelModel and FunnelBaseModel
__lowerCAmelCase: int = TFAutoModel.from_pretrained('sgugger/funnel-random-tiny' )
self.assertIsInstance(UpperCAmelCase , UpperCAmelCase )
__lowerCAmelCase: str = copy.deepcopy(model.config )
__lowerCAmelCase: int = ['FunnelBaseModel']
__lowerCAmelCase: Union[str, Any] = TFAutoModel.from_config(UpperCAmelCase )
self.assertIsInstance(UpperCAmelCase , UpperCAmelCase )
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(UpperCAmelCase )
__lowerCAmelCase: Any = TFAutoModel.from_pretrained(UpperCAmelCase )
self.assertIsInstance(UpperCAmelCase , UpperCAmelCase )
def UpperCAmelCase ( self : Optional[Any] ) -> Tuple:
try:
AutoConfig.register('new-model' , UpperCAmelCase )
__lowerCAmelCase: Union[str, Any] = [
TFAutoModel,
TFAutoModelForCausalLM,
TFAutoModelForMaskedLM,
TFAutoModelForPreTraining,
TFAutoModelForQuestionAnswering,
TFAutoModelForSequenceClassification,
TFAutoModelForTokenClassification,
]
for auto_class in auto_classes:
with self.subTest(auto_class.__name__ ):
# Wrong config class will raise an error
with self.assertRaises(UpperCAmelCase ):
auto_class.register(UpperCAmelCase , UpperCAmelCase )
auto_class.register(UpperCAmelCase , UpperCAmelCase )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(UpperCAmelCase ):
auto_class.register(UpperCAmelCase , UpperCAmelCase )
# Now that the config is registered, it can be used as any other config with the auto-API
__lowerCAmelCase: Optional[int] = BertModelTester(self ).get_config()
__lowerCAmelCase: Union[str, Any] = NewModelConfig(**tiny_config.to_dict() )
__lowerCAmelCase: int = auto_class.from_config(UpperCAmelCase )
self.assertIsInstance(UpperCAmelCase , UpperCAmelCase )
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(UpperCAmelCase )
__lowerCAmelCase: str = auto_class.from_pretrained(UpperCAmelCase )
self.assertIsInstance(UpperCAmelCase , UpperCAmelCase )
finally:
if "new-model" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["new-model"]
for mapping in (
TF_MODEL_MAPPING,
TF_MODEL_FOR_PRETRAINING_MAPPING,
TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_MASKED_LM_MAPPING,
):
if NewModelConfig in mapping._extra_content:
del mapping._extra_content[NewModelConfig]
def UpperCAmelCase ( self : Tuple ) -> Any:
with self.assertRaisesRegex(
UpperCAmelCase , 'bert-base is not a local folder and is not a valid model identifier' ):
__lowerCAmelCase: Tuple = TFAutoModel.from_pretrained('bert-base' )
def UpperCAmelCase ( self : Any ) -> Optional[int]:
with self.assertRaisesRegex(
UpperCAmelCase , R'aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)' ):
__lowerCAmelCase: Any = TFAutoModel.from_pretrained(UpperCAmelCase , revision='aaaaaa' )
def UpperCAmelCase ( self : Dict ) -> Any:
with self.assertRaisesRegex(
UpperCAmelCase , 'hf-internal-testing/config-no-model does not appear to have a file named pytorch_model.bin' , ):
__lowerCAmelCase: Dict = TFAutoModel.from_pretrained('hf-internal-testing/config-no-model' )
def UpperCAmelCase ( self : Union[str, Any] ) -> int:
with self.assertRaisesRegex(UpperCAmelCase , 'Use `from_pt=True` to load this model' ):
__lowerCAmelCase: int = TFAutoModel.from_pretrained('hf-internal-testing/tiny-bert-pt-only' )
def UpperCAmelCase ( self : List[Any] ) -> Dict:
# Make sure we have cached the model.
__lowerCAmelCase: Tuple = TFAutoModel.from_pretrained('hf-internal-testing/tiny-random-bert' )
with RequestCounter() as counter:
__lowerCAmelCase: Optional[Any] = TFAutoModel.from_pretrained('hf-internal-testing/tiny-random-bert' )
self.assertEqual(counter.get_request_count , 0 )
self.assertEqual(counter.head_request_count , 1 )
self.assertEqual(counter.other_request_count , 0 )
# With a sharded checkpoint
__lowerCAmelCase: str = TFAutoModel.from_pretrained('ArthurZ/tiny-random-bert-sharded' )
with RequestCounter() as counter:
__lowerCAmelCase: Optional[Any] = TFAutoModel.from_pretrained('ArthurZ/tiny-random-bert-sharded' )
self.assertEqual(counter.get_request_count , 0 )
self.assertEqual(counter.head_request_count , 1 )
self.assertEqual(counter.other_request_count , 0 )
| 322 |
import tempfile
import torch
from diffusers import (
DEISMultistepScheduler,
DPMSolverMultistepScheduler,
DPMSolverSinglestepScheduler,
UniPCMultistepScheduler,
)
from .test_schedulers import SchedulerCommonTest
class A_ ( snake_case__ ):
_lowercase : int = (DPMSolverSinglestepScheduler,)
_lowercase : Optional[Any] = (('num_inference_steps', 2_5),)
def UpperCAmelCase ( self : Dict , **UpperCAmelCase : List[Any] ) -> Optional[Any]:
__lowerCAmelCase: Union[str, Any] = {
'num_train_timesteps': 1_0_0_0,
'beta_start': 0.0001,
'beta_end': 0.02,
'beta_schedule': 'linear',
'solver_order': 2,
'prediction_type': 'epsilon',
'thresholding': False,
'sample_max_value': 1.0,
'algorithm_type': 'dpmsolver++',
'solver_type': 'midpoint',
'lambda_min_clipped': -float('inf' ),
'variance_type': None,
}
config.update(**UpperCAmelCase )
return config
def UpperCAmelCase ( self : str , UpperCAmelCase : List[Any]=0 , **UpperCAmelCase : str ) -> Any:
__lowerCAmelCase: Optional[int] = dict(self.forward_default_kwargs )
__lowerCAmelCase: int = kwargs.pop('num_inference_steps' , UpperCAmelCase )
__lowerCAmelCase: int = self.dummy_sample
__lowerCAmelCase: Union[str, Any] = 0.1 * sample
__lowerCAmelCase: str = [residual + 0.2, residual + 0.15, residual + 0.10]
for scheduler_class in self.scheduler_classes:
__lowerCAmelCase: Union[str, Any] = self.get_scheduler_config(**UpperCAmelCase )
__lowerCAmelCase: Optional[Any] = scheduler_class(**UpperCAmelCase )
scheduler.set_timesteps(UpperCAmelCase )
# copy over dummy past residuals
__lowerCAmelCase: Optional[int] = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(UpperCAmelCase )
__lowerCAmelCase: Dict = scheduler_class.from_pretrained(UpperCAmelCase )
new_scheduler.set_timesteps(UpperCAmelCase )
# copy over dummy past residuals
__lowerCAmelCase: Optional[int] = dummy_past_residuals[: new_scheduler.config.solver_order]
__lowerCAmelCase , __lowerCAmelCase: Optional[int] = sample, sample
for t in range(UpperCAmelCase , time_step + scheduler.config.solver_order + 1 ):
__lowerCAmelCase: str = scheduler.step(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , **UpperCAmelCase ).prev_sample
__lowerCAmelCase: str = new_scheduler.step(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , **UpperCAmelCase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def UpperCAmelCase ( self : str ) -> str:
pass
def UpperCAmelCase ( self : Optional[Any] , UpperCAmelCase : Any=0 , **UpperCAmelCase : Optional[int] ) -> Tuple:
__lowerCAmelCase: Tuple = dict(self.forward_default_kwargs )
__lowerCAmelCase: Tuple = kwargs.pop('num_inference_steps' , UpperCAmelCase )
__lowerCAmelCase: Tuple = self.dummy_sample
__lowerCAmelCase: Union[str, Any] = 0.1 * sample
__lowerCAmelCase: Tuple = [residual + 0.2, residual + 0.15, residual + 0.10]
for scheduler_class in self.scheduler_classes:
__lowerCAmelCase: Dict = self.get_scheduler_config()
__lowerCAmelCase: Any = scheduler_class(**UpperCAmelCase )
scheduler.set_timesteps(UpperCAmelCase )
# copy over dummy past residuals (must be after setting timesteps)
__lowerCAmelCase: List[Any] = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(UpperCAmelCase )
__lowerCAmelCase: List[str] = scheduler_class.from_pretrained(UpperCAmelCase )
# copy over dummy past residuals
new_scheduler.set_timesteps(UpperCAmelCase )
# copy over dummy past residual (must be after setting timesteps)
__lowerCAmelCase: Optional[Any] = dummy_past_residuals[: new_scheduler.config.solver_order]
__lowerCAmelCase: Any = scheduler.step(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , **UpperCAmelCase ).prev_sample
__lowerCAmelCase: Dict = new_scheduler.step(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , **UpperCAmelCase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def UpperCAmelCase ( self : int , UpperCAmelCase : Dict=None , **UpperCAmelCase : List[str] ) -> Union[str, Any]:
if scheduler is None:
__lowerCAmelCase: str = self.scheduler_classes[0]
__lowerCAmelCase: int = self.get_scheduler_config(**UpperCAmelCase )
__lowerCAmelCase: Any = scheduler_class(**UpperCAmelCase )
__lowerCAmelCase: List[Any] = self.scheduler_classes[0]
__lowerCAmelCase: List[str] = self.get_scheduler_config(**UpperCAmelCase )
__lowerCAmelCase: Optional[Any] = scheduler_class(**UpperCAmelCase )
__lowerCAmelCase: List[Any] = 1_0
__lowerCAmelCase: Dict = self.dummy_model()
__lowerCAmelCase: Dict = self.dummy_sample_deter
scheduler.set_timesteps(UpperCAmelCase )
for i, t in enumerate(scheduler.timesteps ):
__lowerCAmelCase: Dict = model(UpperCAmelCase , UpperCAmelCase )
__lowerCAmelCase: List[Any] = scheduler.step(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ).prev_sample
return sample
def UpperCAmelCase ( self : List[str] ) -> Union[str, Any]:
__lowerCAmelCase: List[str] = DPMSolverSinglestepScheduler(**self.get_scheduler_config() )
__lowerCAmelCase: Any = 5_0
__lowerCAmelCase: int = self.dummy_model()
__lowerCAmelCase: List[str] = self.dummy_sample_deter
scheduler.set_timesteps(UpperCAmelCase )
# make sure that the first t is uneven
for i, t in enumerate(scheduler.timesteps[3:] ):
__lowerCAmelCase: List[Any] = model(UpperCAmelCase , UpperCAmelCase )
__lowerCAmelCase: List[Any] = scheduler.step(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ).prev_sample
__lowerCAmelCase: Optional[int] = torch.mean(torch.abs(UpperCAmelCase ) )
assert abs(result_mean.item() - 0.2574 ) < 1E-3
def UpperCAmelCase ( self : Optional[int] ) -> Dict:
for timesteps in [2_5, 5_0, 1_0_0, 9_9_9, 1_0_0_0]:
self.check_over_configs(num_train_timesteps=UpperCAmelCase )
def UpperCAmelCase ( self : Optional[Any] ) -> Any:
# make sure that iterating over schedulers with same config names gives same results
# for defaults
__lowerCAmelCase: List[str] = DPMSolverSinglestepScheduler(**self.get_scheduler_config() )
__lowerCAmelCase: Dict = self.full_loop(scheduler=UpperCAmelCase )
__lowerCAmelCase: Optional[Any] = torch.mean(torch.abs(UpperCAmelCase ) )
assert abs(result_mean.item() - 0.2791 ) < 1E-3
__lowerCAmelCase: Tuple = DEISMultistepScheduler.from_config(scheduler.config )
__lowerCAmelCase: List[str] = DPMSolverMultistepScheduler.from_config(scheduler.config )
__lowerCAmelCase: Any = UniPCMultistepScheduler.from_config(scheduler.config )
__lowerCAmelCase: Optional[int] = DPMSolverSinglestepScheduler.from_config(scheduler.config )
__lowerCAmelCase: Union[str, Any] = self.full_loop(scheduler=UpperCAmelCase )
__lowerCAmelCase: List[Any] = torch.mean(torch.abs(UpperCAmelCase ) )
assert abs(result_mean.item() - 0.2791 ) < 1E-3
def UpperCAmelCase ( self : List[str] ) -> List[str]:
self.check_over_configs(thresholding=UpperCAmelCase )
for order in [1, 2, 3]:
for solver_type in ["midpoint", "heun"]:
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
thresholding=UpperCAmelCase , prediction_type=UpperCAmelCase , sample_max_value=UpperCAmelCase , algorithm_type='dpmsolver++' , solver_order=UpperCAmelCase , solver_type=UpperCAmelCase , )
def UpperCAmelCase ( self : Any ) -> Union[str, Any]:
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=UpperCAmelCase )
def UpperCAmelCase ( self : Tuple ) -> str:
for algorithm_type in ["dpmsolver", "dpmsolver++"]:
for solver_type in ["midpoint", "heun"]:
for order in [1, 2, 3]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
solver_order=UpperCAmelCase , solver_type=UpperCAmelCase , prediction_type=UpperCAmelCase , algorithm_type=UpperCAmelCase , )
__lowerCAmelCase: Dict = self.full_loop(
solver_order=UpperCAmelCase , solver_type=UpperCAmelCase , prediction_type=UpperCAmelCase , algorithm_type=UpperCAmelCase , )
assert not torch.isnan(UpperCAmelCase ).any(), "Samples have nan numbers"
def UpperCAmelCase ( self : Optional[Any] ) -> str:
self.check_over_configs(lower_order_final=UpperCAmelCase )
self.check_over_configs(lower_order_final=UpperCAmelCase )
def UpperCAmelCase ( self : str ) -> Any:
self.check_over_configs(lambda_min_clipped=-float('inf' ) )
self.check_over_configs(lambda_min_clipped=-5.1 )
def UpperCAmelCase ( self : List[Any] ) -> str:
self.check_over_configs(variance_type=UpperCAmelCase )
self.check_over_configs(variance_type='learned_range' )
def UpperCAmelCase ( self : Union[str, Any] ) -> List[Any]:
for num_inference_steps in [1, 2, 3, 5, 1_0, 5_0, 1_0_0, 9_9_9, 1_0_0_0]:
self.check_over_forward(num_inference_steps=UpperCAmelCase , time_step=0 )
def UpperCAmelCase ( self : Any ) -> int:
__lowerCAmelCase: Any = self.full_loop()
__lowerCAmelCase: Tuple = torch.mean(torch.abs(UpperCAmelCase ) )
assert abs(result_mean.item() - 0.2791 ) < 1E-3
def UpperCAmelCase ( self : Any ) -> Union[str, Any]:
__lowerCAmelCase: List[str] = self.full_loop(use_karras_sigmas=UpperCAmelCase )
__lowerCAmelCase: str = torch.mean(torch.abs(UpperCAmelCase ) )
assert abs(result_mean.item() - 0.2248 ) < 1E-3
def UpperCAmelCase ( self : Dict ) -> Optional[Any]:
__lowerCAmelCase: Tuple = self.full_loop(prediction_type='v_prediction' )
__lowerCAmelCase: List[str] = torch.mean(torch.abs(UpperCAmelCase ) )
assert abs(result_mean.item() - 0.1453 ) < 1E-3
def UpperCAmelCase ( self : str ) -> List[str]:
__lowerCAmelCase: int = self.full_loop(prediction_type='v_prediction' , use_karras_sigmas=UpperCAmelCase )
__lowerCAmelCase: Tuple = torch.mean(torch.abs(UpperCAmelCase ) )
assert abs(result_mean.item() - 0.0649 ) < 1E-3
def UpperCAmelCase ( self : Optional[Any] ) -> Optional[Any]:
__lowerCAmelCase: Any = self.scheduler_classes[0]
__lowerCAmelCase: Optional[Any] = self.get_scheduler_config(thresholding=UpperCAmelCase , dynamic_thresholding_ratio=0 )
__lowerCAmelCase: List[str] = scheduler_class(**UpperCAmelCase )
__lowerCAmelCase: Optional[int] = 1_0
__lowerCAmelCase: Union[str, Any] = self.dummy_model()
__lowerCAmelCase: int = self.dummy_sample_deter.half()
scheduler.set_timesteps(UpperCAmelCase )
for i, t in enumerate(scheduler.timesteps ):
__lowerCAmelCase: Any = model(UpperCAmelCase , UpperCAmelCase )
__lowerCAmelCase: Any = scheduler.step(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ).prev_sample
assert sample.dtype == torch.floataa
| 322 | 1 |
from typing import Optional, Tuple, Union
import tensorflow as tf
from ...activations_tf import ACTaFN
from ...file_utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward
from ...modeling_tf_outputs import (
TFBaseModelOutputWithNoAttention,
TFBaseModelOutputWithPoolingAndNoAttention,
TFSequenceClassifierOutput,
)
from ...modeling_tf_utils import TFPreTrainedModel, TFSequenceClassificationLoss, keras_serializable, unpack_inputs
from ...tf_utils import shape_list
from ...utils import logging
from .configuration_regnet import RegNetConfig
_a = logging.get_logger(__name__)
# General docstring
_a = '''RegNetConfig'''
# Base docstring
_a = '''facebook/regnet-y-040'''
_a = [1, 1_0_8_8, 7, 7]
# Image classification docstring
_a = '''facebook/regnet-y-040'''
_a = '''tabby, tabby cat'''
_a = [
'''facebook/regnet-y-040''',
# See all regnet models at https://huggingface.co/models?filter=regnet
]
class A_ ( tf.keras.layers.Layer ):
def __init__( self : Union[str, Any] , UpperCAmelCase : int , UpperCAmelCase : int = 3 , UpperCAmelCase : int = 1 , UpperCAmelCase : int = 1 , UpperCAmelCase : Optional[str] = "relu" , **UpperCAmelCase : Tuple , ) -> List[Any]:
super().__init__(**UpperCAmelCase )
# The padding and conv has been verified in
# https://colab.research.google.com/gist/sayakpaul/854bc10eeaf21c9ee2119e0b9f3841a7/scratchpad.ipynb
__lowerCAmelCase: List[str] = tf.keras.layers.ZeroPaddingaD(padding=kernel_size // 2 )
__lowerCAmelCase: List[str] = tf.keras.layers.ConvaD(
filters=UpperCAmelCase , kernel_size=UpperCAmelCase , strides=UpperCAmelCase , padding='VALID' , groups=UpperCAmelCase , use_bias=UpperCAmelCase , name='convolution' , )
__lowerCAmelCase: Tuple = tf.keras.layers.BatchNormalization(epsilon=1E-5 , momentum=0.9 , name='normalization' )
__lowerCAmelCase: str = ACTaFN[activation] if activation is not None else tf.identity
def UpperCAmelCase ( self : Any , UpperCAmelCase : List[Any] ) -> List[str]:
__lowerCAmelCase: Dict = self.convolution(self.padding(UpperCAmelCase ) )
__lowerCAmelCase: List[str] = self.normalization(UpperCAmelCase )
__lowerCAmelCase: Tuple = self.activation(UpperCAmelCase )
return hidden_state
class A_ ( tf.keras.layers.Layer ):
def __init__( self : List[str] , UpperCAmelCase : RegNetConfig , **UpperCAmelCase : Any ) -> str:
super().__init__(**UpperCAmelCase )
__lowerCAmelCase: Any = config.num_channels
__lowerCAmelCase: Tuple = TFRegNetConvLayer(
out_channels=config.embedding_size , kernel_size=3 , stride=2 , activation=config.hidden_act , name='embedder' , )
def UpperCAmelCase ( self : Dict , UpperCAmelCase : List[str] ) -> List[str]:
__lowerCAmelCase: int = shape_list(UpperCAmelCase )[1]
if tf.executing_eagerly() and num_channels != self.num_channels:
raise ValueError(
'Make sure that the channel dimension of the pixel values match with the one set in the configuration.' )
# When running on CPU, `tf.keras.layers.Conv2D` doesn't support `NCHW` format.
# So change the input format from `NCHW` to `NHWC`.
# shape = (batch_size, in_height, in_width, in_channels=num_channels)
__lowerCAmelCase: str = tf.transpose(UpperCAmelCase , perm=(0, 2, 3, 1) )
__lowerCAmelCase: Tuple = self.embedder(UpperCAmelCase )
return hidden_state
class A_ ( tf.keras.layers.Layer ):
def __init__( self : Any , UpperCAmelCase : int , UpperCAmelCase : int = 2 , **UpperCAmelCase : Dict ) -> Optional[Any]:
super().__init__(**UpperCAmelCase )
__lowerCAmelCase: Union[str, Any] = tf.keras.layers.ConvaD(
filters=UpperCAmelCase , kernel_size=1 , strides=UpperCAmelCase , use_bias=UpperCAmelCase , name='convolution' )
__lowerCAmelCase: Optional[int] = tf.keras.layers.BatchNormalization(epsilon=1E-5 , momentum=0.9 , name='normalization' )
def UpperCAmelCase ( self : Optional[int] , UpperCAmelCase : tf.Tensor , UpperCAmelCase : bool = False ) -> tf.Tensor:
return self.normalization(self.convolution(UpperCAmelCase ) , training=UpperCAmelCase )
class A_ ( tf.keras.layers.Layer ):
def __init__( self : List[Any] , UpperCAmelCase : int , UpperCAmelCase : int , **UpperCAmelCase : Any ) -> Tuple:
super().__init__(**UpperCAmelCase )
__lowerCAmelCase: Tuple = tf.keras.layers.GlobalAveragePoolingaD(keepdims=UpperCAmelCase , name='pooler' )
__lowerCAmelCase: Any = [
tf.keras.layers.ConvaD(filters=UpperCAmelCase , kernel_size=1 , activation='relu' , name='attention.0' ),
tf.keras.layers.ConvaD(filters=UpperCAmelCase , kernel_size=1 , activation='sigmoid' , name='attention.2' ),
]
def UpperCAmelCase ( self : Optional[Any] , UpperCAmelCase : Optional[Any] ) -> Any:
# [batch_size, h, w, num_channels] -> [batch_size, 1, 1, num_channels]
__lowerCAmelCase: Optional[int] = self.pooler(UpperCAmelCase )
for layer_module in self.attention:
__lowerCAmelCase: Tuple = layer_module(UpperCAmelCase )
__lowerCAmelCase: Union[str, Any] = hidden_state * pooled
return hidden_state
class A_ ( tf.keras.layers.Layer ):
def __init__( self : Optional[int] , UpperCAmelCase : RegNetConfig , UpperCAmelCase : int , UpperCAmelCase : int , UpperCAmelCase : int = 1 , **UpperCAmelCase : int ) -> Union[str, Any]:
super().__init__(**UpperCAmelCase )
__lowerCAmelCase: Any = in_channels != out_channels or stride != 1
__lowerCAmelCase: Any = max(1 , out_channels // config.groups_width )
__lowerCAmelCase: int = (
TFRegNetShortCut(UpperCAmelCase , stride=UpperCAmelCase , name='shortcut' )
if should_apply_shortcut
else tf.keras.layers.Activation('linear' , name='shortcut' )
)
# `self.layers` instead of `self.layer` because that is a reserved argument.
__lowerCAmelCase: Dict = [
TFRegNetConvLayer(UpperCAmelCase , kernel_size=1 , activation=config.hidden_act , name='layer.0' ),
TFRegNetConvLayer(
UpperCAmelCase , stride=UpperCAmelCase , groups=UpperCAmelCase , activation=config.hidden_act , name='layer.1' ),
TFRegNetConvLayer(UpperCAmelCase , kernel_size=1 , activation=UpperCAmelCase , name='layer.2' ),
]
__lowerCAmelCase: Union[str, Any] = ACTaFN[config.hidden_act]
def UpperCAmelCase ( self : Any , UpperCAmelCase : Any ) -> str:
__lowerCAmelCase: Any = hidden_state
for layer_module in self.layers:
__lowerCAmelCase: str = layer_module(UpperCAmelCase )
__lowerCAmelCase: List[str] = self.shortcut(UpperCAmelCase )
hidden_state += residual
__lowerCAmelCase: Tuple = self.activation(UpperCAmelCase )
return hidden_state
class A_ ( tf.keras.layers.Layer ):
def __init__( self : str , UpperCAmelCase : RegNetConfig , UpperCAmelCase : int , UpperCAmelCase : int , UpperCAmelCase : int = 1 , **UpperCAmelCase : Any ) -> Optional[Any]:
super().__init__(**UpperCAmelCase )
__lowerCAmelCase: Tuple = in_channels != out_channels or stride != 1
__lowerCAmelCase: Any = max(1 , out_channels // config.groups_width )
__lowerCAmelCase: str = (
TFRegNetShortCut(UpperCAmelCase , stride=UpperCAmelCase , name='shortcut' )
if should_apply_shortcut
else tf.keras.layers.Activation('linear' , name='shortcut' )
)
__lowerCAmelCase: List[str] = [
TFRegNetConvLayer(UpperCAmelCase , kernel_size=1 , activation=config.hidden_act , name='layer.0' ),
TFRegNetConvLayer(
UpperCAmelCase , stride=UpperCAmelCase , groups=UpperCAmelCase , activation=config.hidden_act , name='layer.1' ),
TFRegNetSELayer(UpperCAmelCase , reduced_channels=int(round(in_channels / 4 ) ) , name='layer.2' ),
TFRegNetConvLayer(UpperCAmelCase , kernel_size=1 , activation=UpperCAmelCase , name='layer.3' ),
]
__lowerCAmelCase: int = ACTaFN[config.hidden_act]
def UpperCAmelCase ( self : str , UpperCAmelCase : List[str] ) -> Tuple:
__lowerCAmelCase: List[Any] = hidden_state
for layer_module in self.layers:
__lowerCAmelCase: Dict = layer_module(UpperCAmelCase )
__lowerCAmelCase: int = self.shortcut(UpperCAmelCase )
hidden_state += residual
__lowerCAmelCase: Union[str, Any] = self.activation(UpperCAmelCase )
return hidden_state
class A_ ( tf.keras.layers.Layer ):
def __init__( self : Union[str, Any] , UpperCAmelCase : RegNetConfig , UpperCAmelCase : int , UpperCAmelCase : int , UpperCAmelCase : int = 2 , UpperCAmelCase : int = 2 , **UpperCAmelCase : str ) -> Union[str, Any]:
super().__init__(**UpperCAmelCase )
__lowerCAmelCase: int = TFRegNetXLayer if config.layer_type == 'x' else TFRegNetYLayer
__lowerCAmelCase: List[Any] = [
# downsampling is done in the first layer with stride of 2
layer(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , stride=UpperCAmelCase , name='layers.0' ),
*[layer(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , name=F'''layers.{i+1}''' ) for i in range(depth - 1 )],
]
def UpperCAmelCase ( self : Optional[Any] , UpperCAmelCase : Optional[int] ) -> List[str]:
for layer_module in self.layers:
__lowerCAmelCase: Any = layer_module(UpperCAmelCase )
return hidden_state
class A_ ( tf.keras.layers.Layer ):
def __init__( self : List[Any] , UpperCAmelCase : RegNetConfig , **UpperCAmelCase : Optional[int] ) -> Optional[Any]:
super().__init__(**UpperCAmelCase )
__lowerCAmelCase: Union[str, Any] = []
# based on `downsample_in_first_stage`, the first layer of the first stage may or may not downsample the input
self.stages.append(
TFRegNetStage(
UpperCAmelCase , config.embedding_size , config.hidden_sizes[0] , stride=2 if config.downsample_in_first_stage else 1 , depth=config.depths[0] , name='stages.0' , ) )
__lowerCAmelCase: int = zip(config.hidden_sizes , config.hidden_sizes[1:] )
for i, ((in_channels, out_channels), depth) in enumerate(zip(UpperCAmelCase , config.depths[1:] ) ):
self.stages.append(TFRegNetStage(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , depth=UpperCAmelCase , name=F'''stages.{i+1}''' ) )
def UpperCAmelCase ( self : Any , UpperCAmelCase : tf.Tensor , UpperCAmelCase : bool = False , UpperCAmelCase : bool = True ) -> TFBaseModelOutputWithNoAttention:
__lowerCAmelCase: Dict = () if output_hidden_states else None
for stage_module in self.stages:
if output_hidden_states:
__lowerCAmelCase: Optional[Any] = hidden_states + (hidden_state,)
__lowerCAmelCase: str = stage_module(UpperCAmelCase )
if output_hidden_states:
__lowerCAmelCase: List[Any] = hidden_states + (hidden_state,)
if not return_dict:
return tuple(v for v in [hidden_state, hidden_states] if v is not None )
return TFBaseModelOutputWithNoAttention(last_hidden_state=UpperCAmelCase , hidden_states=UpperCAmelCase )
@keras_serializable
class A_ ( tf.keras.layers.Layer ):
_lowercase : Dict = RegNetConfig
def __init__( self : Optional[Any] , UpperCAmelCase : int , **UpperCAmelCase : List[Any] ) -> Dict:
super().__init__(**UpperCAmelCase )
__lowerCAmelCase: Optional[Any] = config
__lowerCAmelCase: Union[str, Any] = TFRegNetEmbeddings(UpperCAmelCase , name='embedder' )
__lowerCAmelCase: Tuple = TFRegNetEncoder(UpperCAmelCase , name='encoder' )
__lowerCAmelCase: Tuple = tf.keras.layers.GlobalAveragePoolingaD(keepdims=UpperCAmelCase , name='pooler' )
@unpack_inputs
def UpperCAmelCase ( self : List[str] , UpperCAmelCase : tf.Tensor , UpperCAmelCase : Optional[bool] = None , UpperCAmelCase : Optional[bool] = None , UpperCAmelCase : bool = False , ) -> TFBaseModelOutputWithPoolingAndNoAttention:
__lowerCAmelCase: Dict = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
__lowerCAmelCase: Optional[Any] = return_dict if return_dict is not None else self.config.use_return_dict
__lowerCAmelCase: Dict = self.embedder(UpperCAmelCase , training=UpperCAmelCase )
__lowerCAmelCase: List[str] = self.encoder(
UpperCAmelCase , output_hidden_states=UpperCAmelCase , return_dict=UpperCAmelCase , training=UpperCAmelCase )
__lowerCAmelCase: List[Any] = encoder_outputs[0]
__lowerCAmelCase: List[Any] = self.pooler(UpperCAmelCase )
# Change to NCHW output format have uniformity in the modules
__lowerCAmelCase: int = tf.transpose(UpperCAmelCase , perm=(0, 3, 1, 2) )
__lowerCAmelCase: Optional[int] = tf.transpose(UpperCAmelCase , perm=(0, 3, 1, 2) )
# Change the other hidden state outputs to NCHW as well
if output_hidden_states:
__lowerCAmelCase: int = tuple([tf.transpose(UpperCAmelCase , perm=(0, 3, 1, 2) ) for h in encoder_outputs[1]] )
if not return_dict:
return (last_hidden_state, pooled_output) + encoder_outputs[1:]
return TFBaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=UpperCAmelCase , pooler_output=UpperCAmelCase , hidden_states=hidden_states if output_hidden_states else encoder_outputs.hidden_states , )
class A_ ( snake_case__ ):
_lowercase : Optional[int] = RegNetConfig
_lowercase : Any = 'regnet'
_lowercase : Any = 'pixel_values'
@property
def UpperCAmelCase ( self : int ) -> List[Any]:
return {"pixel_values": tf.TensorSpec(shape=(None, self.config.num_channels, 2_2_4, 2_2_4) , dtype=tf.floataa )}
_a = R'''
Parameters:
This model is a Tensorflow
[tf.keras.layers.Layer](https://www.tensorflow.org/api_docs/python/tf/keras/layers/Layer) sub-class. Use it as a
regular Tensorflow Module and refer to the Tensorflow documentation for all matter related to general usage and
behavior.
config ([`RegNetConfig`]): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the [`~TFPreTrainedModel.from_pretrained`] method to load the model weights.
'''
_a = R'''
Args:
pixel_values (`tf.Tensor` of shape `(batch_size, num_channels, height, width)`):
Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See
[`ConveNextImageProcessor.__call__`] for details.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
'''
@add_start_docstrings(
'The bare RegNet model outputting raw features without any specific head on top.' , snake_case__ , )
class A_ ( snake_case__ ):
def __init__( self : Any , UpperCAmelCase : RegNetConfig , *UpperCAmelCase : Any , **UpperCAmelCase : Any ) -> Dict:
super().__init__(UpperCAmelCase , *UpperCAmelCase , **UpperCAmelCase )
__lowerCAmelCase: Dict = TFRegNetMainLayer(UpperCAmelCase , name='regnet' )
@unpack_inputs
@add_start_docstrings_to_model_forward(UpperCAmelCase )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=UpperCAmelCase , config_class=_CONFIG_FOR_DOC , modality='vision' , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def UpperCAmelCase ( self : Any , UpperCAmelCase : tf.Tensor , UpperCAmelCase : Optional[bool] = None , UpperCAmelCase : Optional[bool] = None , UpperCAmelCase : Optional[int]=False , ) -> Union[TFBaseModelOutputWithPoolingAndNoAttention, Tuple[tf.Tensor]]:
__lowerCAmelCase: Optional[int] = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
__lowerCAmelCase: List[str] = return_dict if return_dict is not None else self.config.use_return_dict
__lowerCAmelCase: Dict = self.regnet(
pixel_values=UpperCAmelCase , output_hidden_states=UpperCAmelCase , return_dict=UpperCAmelCase , training=UpperCAmelCase , )
if not return_dict:
return (outputs[0],) + outputs[1:]
return TFBaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=outputs.last_hidden_state , pooler_output=outputs.pooler_output , hidden_states=outputs.hidden_states , )
@add_start_docstrings(
'\n RegNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for\n ImageNet.\n ' , snake_case__ , )
class A_ ( snake_case__ , snake_case__ ):
def __init__( self : List[str] , UpperCAmelCase : RegNetConfig , *UpperCAmelCase : List[str] , **UpperCAmelCase : Optional[int] ) -> Union[str, Any]:
super().__init__(UpperCAmelCase , *UpperCAmelCase , **UpperCAmelCase )
__lowerCAmelCase: Dict = config.num_labels
__lowerCAmelCase: Any = TFRegNetMainLayer(UpperCAmelCase , name='regnet' )
# classification head
__lowerCAmelCase: List[Any] = [
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(config.num_labels , name='classifier.1' ) if config.num_labels > 0 else tf.identity,
]
@unpack_inputs
@add_start_docstrings_to_model_forward(UpperCAmelCase )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=UpperCAmelCase , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def UpperCAmelCase ( self : Optional[Any] , UpperCAmelCase : tf.Tensor = None , UpperCAmelCase : tf.Tensor = None , UpperCAmelCase : bool = None , UpperCAmelCase : bool = None , UpperCAmelCase : List[str]=False , ) -> Union[TFSequenceClassifierOutput, Tuple[tf.Tensor]]:
__lowerCAmelCase: Optional[int] = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
__lowerCAmelCase: Optional[int] = return_dict if return_dict is not None else self.config.use_return_dict
__lowerCAmelCase: int = self.regnet(
UpperCAmelCase , output_hidden_states=UpperCAmelCase , return_dict=UpperCAmelCase , training=UpperCAmelCase )
__lowerCAmelCase: List[Any] = outputs.pooler_output if return_dict else outputs[1]
__lowerCAmelCase: Dict = self.classifier[0](UpperCAmelCase )
__lowerCAmelCase: Optional[Any] = self.classifier[1](UpperCAmelCase )
__lowerCAmelCase: Optional[int] = None if labels is None else self.hf_compute_loss(labels=UpperCAmelCase , logits=UpperCAmelCase )
if not return_dict:
__lowerCAmelCase: Any = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return TFSequenceClassifierOutput(loss=UpperCAmelCase , logits=UpperCAmelCase , hidden_states=outputs.hidden_states )
| 322 |
import re
import time
from typing import Optional
import IPython.display as disp
from ..trainer_callback import TrainerCallback
from ..trainer_utils import IntervalStrategy, has_length
def _a ( SCREAMING_SNAKE_CASE : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
__lowerCAmelCase: Union[str, Any] = int(SCREAMING_SNAKE_CASE )
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase: List[str] = t // 36_00, (t // 60) % 60, t % 60
return f'''{h}:{m:02d}:{s:02d}''' if h != 0 else f'''{m:02d}:{s:02d}'''
def _a ( SCREAMING_SNAKE_CASE : Dict , SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : Dict , SCREAMING_SNAKE_CASE : str=3_00 ) -> int:
"""simple docstring"""
return f'''
<div>
{prefix}
<progress value=\'{value}\' max=\'{total}\' style=\'width:{width}px; height:20px; vertical-align: middle;\'></progress>
{label}
</div>
'''
def _a ( SCREAMING_SNAKE_CASE : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
__lowerCAmelCase: List[str] = '<table border="1" class="dataframe">\n'
html_code += """ <thead>\n <tr style="text-align: left;">\n"""
for i in items[0]:
html_code += f''' <th>{i}</th>\n'''
html_code += " </tr>\n </thead>\n <tbody>\n"
for line in items[1:]:
html_code += " <tr>\n"
for elt in line:
__lowerCAmelCase: List[Any] = f'''{elt:.6f}''' if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) else str(SCREAMING_SNAKE_CASE )
html_code += f''' <td>{elt}</td>\n'''
html_code += " </tr>\n"
html_code += " </tbody>\n</table><p>"
return html_code
class A_ :
_lowercase : str = 5
_lowercase : str = 0.2
def __init__( self : Union[str, Any] , UpperCAmelCase : int , UpperCAmelCase : Optional[str] = None , UpperCAmelCase : bool = True , UpperCAmelCase : Optional["NotebookTrainingTracker"] = None , UpperCAmelCase : int = 3_0_0 , ) -> List[Any]:
__lowerCAmelCase: List[str] = total
__lowerCAmelCase: Optional[int] = '' if prefix is None else prefix
__lowerCAmelCase: int = leave
__lowerCAmelCase: List[str] = parent
__lowerCAmelCase: Optional[Any] = width
__lowerCAmelCase: List[str] = None
__lowerCAmelCase: Dict = None
__lowerCAmelCase: List[str] = None
def UpperCAmelCase ( self : Union[str, Any] , UpperCAmelCase : int , UpperCAmelCase : bool = False , UpperCAmelCase : str = None ) -> Optional[int]:
__lowerCAmelCase: int = value
if comment is not None:
__lowerCAmelCase: Any = comment
if self.last_value is None:
__lowerCAmelCase: List[Any] = time.time()
__lowerCAmelCase: Any = value
__lowerCAmelCase: List[str] = None
__lowerCAmelCase: Dict = self.warmup
__lowerCAmelCase: List[str] = 1
self.update_bar(UpperCAmelCase )
elif value <= self.last_value and not force_update:
return
elif force_update or self.first_calls > 0 or value >= min(self.last_value + self.wait_for , self.total ):
if self.first_calls > 0:
self.first_calls -= 1
__lowerCAmelCase: Union[str, Any] = time.time()
__lowerCAmelCase: str = current_time - self.start_time
# We could have value = self.start_value if the update is called twixe with the same start value.
if value > self.start_value:
__lowerCAmelCase: Dict = self.elapsed_time / (value - self.start_value)
else:
__lowerCAmelCase: int = None
if value >= self.total:
__lowerCAmelCase: Any = self.total
__lowerCAmelCase: str = None
if not self.leave:
self.close()
elif self.average_time_per_item is not None:
__lowerCAmelCase: List[str] = self.average_time_per_item * (self.total - value)
self.update_bar(UpperCAmelCase )
__lowerCAmelCase: Tuple = value
__lowerCAmelCase: int = current_time
if self.average_time_per_item is None:
__lowerCAmelCase: Optional[int] = 1
else:
__lowerCAmelCase: Optional[Any] = max(int(self.update_every / self.average_time_per_item ) , 1 )
def UpperCAmelCase ( self : int , UpperCAmelCase : Any , UpperCAmelCase : List[Any]=None ) -> Union[str, Any]:
__lowerCAmelCase: int = ' ' * (len(str(self.total ) ) - len(str(UpperCAmelCase ) )) + str(UpperCAmelCase )
if self.elapsed_time is None:
__lowerCAmelCase: Dict = F'''[{spaced_value}/{self.total} : < :'''
elif self.predicted_remaining is None:
__lowerCAmelCase: str = F'''[{spaced_value}/{self.total} {format_time(self.elapsed_time )}'''
else:
__lowerCAmelCase: Any = (
F'''[{spaced_value}/{self.total} {format_time(self.elapsed_time )} <'''
F''' {format_time(self.predicted_remaining )}'''
)
self.label += F''', {1/self.average_time_per_item:.2f} it/s'''
self.label += "]" if self.comment is None or len(self.comment ) == 0 else F''', {self.comment}]'''
self.display()
def UpperCAmelCase ( self : Any ) -> Optional[Any]:
__lowerCAmelCase: Any = html_progress_bar(self.value , self.total , self.prefix , self.label , self.width )
if self.parent is not None:
# If this is a child bar, the parent will take care of the display.
self.parent.display()
return
if self.output is None:
__lowerCAmelCase: Tuple = disp.display(disp.HTML(self.html_code ) , display_id=UpperCAmelCase )
else:
self.output.update(disp.HTML(self.html_code ) )
def UpperCAmelCase ( self : str ) -> Optional[Any]:
if self.parent is None and self.output is not None:
self.output.update(disp.HTML('' ) )
class A_ ( snake_case__ ):
def __init__( self : Tuple , UpperCAmelCase : Dict , UpperCAmelCase : List[Any]=None ) -> Any:
super().__init__(UpperCAmelCase )
__lowerCAmelCase: Tuple = None if column_names is None else [column_names]
__lowerCAmelCase: Union[str, Any] = None
def UpperCAmelCase ( self : Union[str, Any] ) -> Any:
__lowerCAmelCase: str = html_progress_bar(self.value , self.total , self.prefix , self.label , self.width )
if self.inner_table is not None:
self.html_code += text_to_html_table(self.inner_table )
if self.child_bar is not None:
self.html_code += self.child_bar.html_code
if self.output is None:
__lowerCAmelCase: Optional[Any] = disp.display(disp.HTML(self.html_code ) , display_id=UpperCAmelCase )
else:
self.output.update(disp.HTML(self.html_code ) )
def UpperCAmelCase ( self : Tuple , UpperCAmelCase : List[Any] ) -> Dict:
if self.inner_table is None:
__lowerCAmelCase: List[str] = [list(values.keys() ), list(values.values() )]
else:
__lowerCAmelCase: Any = self.inner_table[0]
if len(self.inner_table ) == 1:
# We give a chance to update the column names at the first iteration
for key in values.keys():
if key not in columns:
columns.append(UpperCAmelCase )
__lowerCAmelCase: List[Any] = columns
self.inner_table.append([values[c] for c in columns] )
def UpperCAmelCase ( self : Dict , UpperCAmelCase : int , UpperCAmelCase : List[Any]=None , UpperCAmelCase : List[str]=3_0_0 ) -> List[Any]:
__lowerCAmelCase: Union[str, Any] = NotebookProgressBar(UpperCAmelCase , prefix=UpperCAmelCase , parent=self , width=UpperCAmelCase )
return self.child_bar
def UpperCAmelCase ( self : Optional[int] ) -> Optional[int]:
__lowerCAmelCase: Tuple = None
self.display()
class A_ ( snake_case__ ):
def __init__( self : Any ) -> List[str]:
__lowerCAmelCase: int = None
__lowerCAmelCase: Optional[int] = None
__lowerCAmelCase: str = False
def UpperCAmelCase ( self : Tuple , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : Optional[int] , UpperCAmelCase : Any , **UpperCAmelCase : Tuple ) -> str:
__lowerCAmelCase: Tuple = 'Epoch' if args.evaluation_strategy == IntervalStrategy.EPOCH else 'Step'
__lowerCAmelCase: Optional[int] = 0
__lowerCAmelCase: Any = 0
__lowerCAmelCase: Tuple = [self.first_column] + ['Training Loss']
if args.evaluation_strategy != IntervalStrategy.NO:
column_names.append('Validation Loss' )
__lowerCAmelCase: List[Any] = NotebookTrainingTracker(state.max_steps , UpperCAmelCase )
def UpperCAmelCase ( self : Optional[int] , UpperCAmelCase : Tuple , UpperCAmelCase : Any , UpperCAmelCase : Optional[Any] , **UpperCAmelCase : Union[str, Any] ) -> Any:
__lowerCAmelCase: Union[str, Any] = int(state.epoch ) if int(state.epoch ) == state.epoch else F'''{state.epoch:.2f}'''
self.training_tracker.update(
state.global_step + 1 , comment=F'''Epoch {epoch}/{state.num_train_epochs}''' , force_update=self._force_next_update , )
__lowerCAmelCase: Any = False
def UpperCAmelCase ( self : List[Any] , UpperCAmelCase : str , UpperCAmelCase : Optional[int] , UpperCAmelCase : Optional[Any] , UpperCAmelCase : int=None , **UpperCAmelCase : Dict ) -> List[Any]:
if not has_length(UpperCAmelCase ):
return
if self.prediction_bar is None:
if self.training_tracker is not None:
__lowerCAmelCase: int = self.training_tracker.add_child(len(UpperCAmelCase ) )
else:
__lowerCAmelCase: List[str] = NotebookProgressBar(len(UpperCAmelCase ) )
self.prediction_bar.update(1 )
else:
self.prediction_bar.update(self.prediction_bar.value + 1 )
def UpperCAmelCase ( self : Union[str, Any] , UpperCAmelCase : Dict , UpperCAmelCase : Dict , UpperCAmelCase : Optional[Any] , **UpperCAmelCase : int ) -> Union[str, Any]:
if self.prediction_bar is not None:
self.prediction_bar.close()
__lowerCAmelCase: Any = None
def UpperCAmelCase ( self : str , UpperCAmelCase : Tuple , UpperCAmelCase : Optional[Any] , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : int=None , **UpperCAmelCase : Optional[Any] ) -> Optional[Any]:
# Only for when there is no evaluation
if args.evaluation_strategy == IntervalStrategy.NO and "loss" in logs:
__lowerCAmelCase: Union[str, Any] = {'Training Loss': logs['loss']}
# First column is necessarily Step sine we're not in epoch eval strategy
__lowerCAmelCase: Dict = state.global_step
self.training_tracker.write_line(UpperCAmelCase )
def UpperCAmelCase ( self : int , UpperCAmelCase : List[Any] , UpperCAmelCase : List[str] , UpperCAmelCase : Optional[int] , UpperCAmelCase : Tuple=None , **UpperCAmelCase : int ) -> List[str]:
if self.training_tracker is not None:
__lowerCAmelCase: Dict = {'Training Loss': 'No log', 'Validation Loss': 'No log'}
for log in reversed(state.log_history ):
if "loss" in log:
__lowerCAmelCase: List[str] = log['loss']
break
if self.first_column == "Epoch":
__lowerCAmelCase: int = int(state.epoch )
else:
__lowerCAmelCase: Tuple = state.global_step
__lowerCAmelCase: Optional[int] = 'eval'
for k in metrics:
if k.endswith('_loss' ):
__lowerCAmelCase: Union[str, Any] = re.sub(R'\_loss$' , '' , UpperCAmelCase )
__lowerCAmelCase: Optional[Any] = metrics.pop('total_flos' , UpperCAmelCase )
__lowerCAmelCase: str = metrics.pop('epoch' , UpperCAmelCase )
__lowerCAmelCase: int = metrics.pop(F'''{metric_key_prefix}_runtime''' , UpperCAmelCase )
__lowerCAmelCase: List[Any] = metrics.pop(F'''{metric_key_prefix}_samples_per_second''' , UpperCAmelCase )
__lowerCAmelCase: List[str] = metrics.pop(F'''{metric_key_prefix}_steps_per_second''' , UpperCAmelCase )
__lowerCAmelCase: Tuple = metrics.pop(F'''{metric_key_prefix}_jit_compilation_time''' , UpperCAmelCase )
for k, v in metrics.items():
if k == F'''{metric_key_prefix}_loss''':
__lowerCAmelCase: Tuple = v
else:
__lowerCAmelCase: int = k.split('_' )
__lowerCAmelCase: List[Any] = ' '.join([part.capitalize() for part in splits[1:]] )
__lowerCAmelCase: List[Any] = v
self.training_tracker.write_line(UpperCAmelCase )
self.training_tracker.remove_child()
__lowerCAmelCase: List[str] = None
# Evaluation takes a long time so we should force the next update.
__lowerCAmelCase: str = True
def UpperCAmelCase ( self : int , UpperCAmelCase : int , UpperCAmelCase : Tuple , UpperCAmelCase : Optional[int] , **UpperCAmelCase : Optional[int] ) -> Optional[int]:
self.training_tracker.update(
state.global_step , comment=F'''Epoch {int(state.epoch )}/{state.num_train_epochs}''' , force_update=UpperCAmelCase )
__lowerCAmelCase: Union[str, Any] = None
| 322 | 1 |
import unittest
from transformers import (
MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING,
TextaTextGenerationPipeline,
pipeline,
)
from transformers.testing_utils import is_pipeline_test, require_tf, require_torch
from transformers.utils import is_torch_available
from .test_pipelines_common import ANY
if is_torch_available():
import torch
@is_pipeline_test
class A_ ( unittest.TestCase ):
_lowercase : Optional[Any] = MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
_lowercase : Optional[Any] = TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
def UpperCAmelCase ( self : Optional[Any] , UpperCAmelCase : Optional[int] , UpperCAmelCase : Optional[Any] , UpperCAmelCase : Union[str, Any] ) -> Tuple:
__lowerCAmelCase: Optional[int] = TextaTextGenerationPipeline(model=UpperCAmelCase , tokenizer=UpperCAmelCase )
return generator, ["Something to write", "Something else"]
def UpperCAmelCase ( self : Tuple , UpperCAmelCase : List[Any] , UpperCAmelCase : Any ) -> int:
__lowerCAmelCase: Tuple = generator('Something there' )
self.assertEqual(UpperCAmelCase , [{'generated_text': ANY(UpperCAmelCase )}] )
# These are encoder decoder, they don't just append to incoming string
self.assertFalse(outputs[0]['generated_text'].startswith('Something there' ) )
__lowerCAmelCase: Any = generator(['This is great !', 'Something else'] , num_return_sequences=2 , do_sample=UpperCAmelCase )
self.assertEqual(
UpperCAmelCase , [
[{'generated_text': ANY(UpperCAmelCase )}, {'generated_text': ANY(UpperCAmelCase )}],
[{'generated_text': ANY(UpperCAmelCase )}, {'generated_text': ANY(UpperCAmelCase )}],
] , )
__lowerCAmelCase: Dict = generator(
['This is great !', 'Something else'] , num_return_sequences=2 , batch_size=2 , do_sample=UpperCAmelCase )
self.assertEqual(
UpperCAmelCase , [
[{'generated_text': ANY(UpperCAmelCase )}, {'generated_text': ANY(UpperCAmelCase )}],
[{'generated_text': ANY(UpperCAmelCase )}, {'generated_text': ANY(UpperCAmelCase )}],
] , )
with self.assertRaises(UpperCAmelCase ):
generator(4 )
@require_torch
def UpperCAmelCase ( self : int ) -> Any:
__lowerCAmelCase: str = pipeline('text2text-generation' , model='patrickvonplaten/t5-tiny-random' , framework='pt' )
# do_sample=False necessary for reproducibility
__lowerCAmelCase: Any = generator('Something there' , do_sample=UpperCAmelCase )
self.assertEqual(UpperCAmelCase , [{'generated_text': ''}] )
__lowerCAmelCase: Any = 3
__lowerCAmelCase: List[str] = generator(
'Something there' , num_return_sequences=UpperCAmelCase , num_beams=UpperCAmelCase , )
__lowerCAmelCase: Optional[Any] = [
{'generated_text': 'Beide Beide Beide Beide Beide Beide Beide Beide Beide'},
{'generated_text': 'Beide Beide Beide Beide Beide Beide Beide Beide'},
{'generated_text': ''},
]
self.assertEqual(UpperCAmelCase , UpperCAmelCase )
__lowerCAmelCase: str = generator('This is a test' , do_sample=UpperCAmelCase , num_return_sequences=2 , return_tensors=UpperCAmelCase )
self.assertEqual(
UpperCAmelCase , [
{'generated_token_ids': ANY(torch.Tensor )},
{'generated_token_ids': ANY(torch.Tensor )},
] , )
__lowerCAmelCase: int = generator.model.config.eos_token_id
__lowerCAmelCase: str = '<pad>'
__lowerCAmelCase: Optional[Any] = generator(
['This is a test', 'This is a second test'] , do_sample=UpperCAmelCase , num_return_sequences=2 , batch_size=2 , return_tensors=UpperCAmelCase , )
self.assertEqual(
UpperCAmelCase , [
[
{'generated_token_ids': ANY(torch.Tensor )},
{'generated_token_ids': ANY(torch.Tensor )},
],
[
{'generated_token_ids': ANY(torch.Tensor )},
{'generated_token_ids': ANY(torch.Tensor )},
],
] , )
@require_tf
def UpperCAmelCase ( self : Any ) -> Optional[int]:
__lowerCAmelCase: Union[str, Any] = pipeline('text2text-generation' , model='patrickvonplaten/t5-tiny-random' , framework='tf' )
# do_sample=False necessary for reproducibility
__lowerCAmelCase: Optional[Any] = generator('Something there' , do_sample=UpperCAmelCase )
self.assertEqual(UpperCAmelCase , [{'generated_text': ''}] )
| 322 |
import os
from datetime import datetime as dt
from github import Github
_a = [
'''good first issue''',
'''feature request''',
'''wip''',
]
def _a ( ) -> List[Any]:
"""simple docstring"""
__lowerCAmelCase: Dict = Github(os.environ['GITHUB_TOKEN'] )
__lowerCAmelCase: Tuple = g.get_repo('huggingface/accelerate' )
__lowerCAmelCase: str = repo.get_issues(state='open' )
for issue in open_issues:
__lowerCAmelCase: Optional[int] = sorted([comment for comment in issue.get_comments()] , key=lambda SCREAMING_SNAKE_CASE : i.created_at , reverse=SCREAMING_SNAKE_CASE )
__lowerCAmelCase: Dict = comments[0] if len(SCREAMING_SNAKE_CASE ) > 0 else None
__lowerCAmelCase: Tuple = dt.utcnow()
__lowerCAmelCase: Optional[int] = (current_time - issue.updated_at).days
__lowerCAmelCase: str = (current_time - issue.created_at).days
if (
last_comment is not None
and last_comment.user.login == "github-actions[bot]"
and days_since_updated > 7
and days_since_creation >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Close issue since it has been 7 days of inactivity since bot mention.
issue.edit(state='closed' )
elif (
days_since_updated > 23
and days_since_creation >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Add stale comment
issue.create_comment(
'This issue has been automatically marked as stale because it has not had '
'recent activity. If you think this still needs to be addressed '
'please comment on this thread.\n\nPlease note that issues that do not follow the '
'[contributing guidelines](https://github.com/huggingface/accelerate/blob/main/CONTRIBUTING.md) '
'are likely to be ignored.' )
if __name__ == "__main__":
main()
| 322 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_a = logging.get_logger(__name__)
_a = {
'''microsoft/markuplm-base''': '''https://huggingface.co/microsoft/markuplm-base/resolve/main/config.json''',
'''microsoft/markuplm-large''': '''https://huggingface.co/microsoft/markuplm-large/resolve/main/config.json''',
}
class A_ ( snake_case__ ):
_lowercase : Any = 'markuplm'
def __init__( self : Optional[Any] , UpperCAmelCase : Union[str, Any]=3_0_5_2_2 , UpperCAmelCase : Optional[Any]=7_6_8 , UpperCAmelCase : List[Any]=1_2 , UpperCAmelCase : Tuple=1_2 , UpperCAmelCase : List[str]=3_0_7_2 , UpperCAmelCase : str="gelu" , UpperCAmelCase : Tuple=0.1 , UpperCAmelCase : Optional[int]=0.1 , UpperCAmelCase : List[str]=5_1_2 , UpperCAmelCase : int=2 , UpperCAmelCase : List[Any]=0.02 , UpperCAmelCase : Any=1E-12 , UpperCAmelCase : List[Any]=0 , UpperCAmelCase : int=0 , UpperCAmelCase : str=2 , UpperCAmelCase : Any=2_5_6 , UpperCAmelCase : List[Any]=1_0_2_4 , UpperCAmelCase : Tuple=2_1_6 , UpperCAmelCase : Any=1_0_0_1 , UpperCAmelCase : Dict=3_2 , UpperCAmelCase : Optional[int]=5_0 , UpperCAmelCase : Tuple="absolute" , UpperCAmelCase : str=True , UpperCAmelCase : Any=None , **UpperCAmelCase : Dict , ) -> Tuple:
super().__init__(
pad_token_id=UpperCAmelCase , bos_token_id=UpperCAmelCase , eos_token_id=UpperCAmelCase , **UpperCAmelCase , )
__lowerCAmelCase: Dict = vocab_size
__lowerCAmelCase: Union[str, Any] = hidden_size
__lowerCAmelCase: Any = num_hidden_layers
__lowerCAmelCase: Tuple = num_attention_heads
__lowerCAmelCase: str = hidden_act
__lowerCAmelCase: Tuple = intermediate_size
__lowerCAmelCase: Any = hidden_dropout_prob
__lowerCAmelCase: Tuple = attention_probs_dropout_prob
__lowerCAmelCase: str = max_position_embeddings
__lowerCAmelCase: Optional[Any] = type_vocab_size
__lowerCAmelCase: Any = initializer_range
__lowerCAmelCase: Optional[Any] = layer_norm_eps
__lowerCAmelCase: Optional[Any] = position_embedding_type
__lowerCAmelCase: List[Any] = use_cache
__lowerCAmelCase: Union[str, Any] = classifier_dropout
# additional properties
__lowerCAmelCase: Tuple = max_depth
__lowerCAmelCase: Union[str, Any] = max_xpath_tag_unit_embeddings
__lowerCAmelCase: Union[str, Any] = max_xpath_subs_unit_embeddings
__lowerCAmelCase: List[Any] = tag_pad_id
__lowerCAmelCase: Any = subs_pad_id
__lowerCAmelCase: Any = xpath_unit_hidden_size
| 322 |
from .testing import (
are_the_same_tensors,
execute_subprocess_async,
require_bnb,
require_cpu,
require_cuda,
require_huggingface_suite,
require_mps,
require_multi_gpu,
require_multi_xpu,
require_safetensors,
require_single_gpu,
require_single_xpu,
require_torch_min_version,
require_tpu,
require_xpu,
skip,
slow,
)
from .training import RegressionDataset, RegressionModel, RegressionModelaXPU
from .scripts import test_script, test_sync, test_ops # isort: skip
| 322 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_a = {
'''configuration_blip_2''': [
'''BLIP_2_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''Blip2Config''',
'''Blip2QFormerConfig''',
'''Blip2VisionConfig''',
],
'''processing_blip_2''': ['''Blip2Processor'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a = [
'''BLIP_2_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''Blip2Model''',
'''Blip2QFormerModel''',
'''Blip2PreTrainedModel''',
'''Blip2ForConditionalGeneration''',
'''Blip2VisionModel''',
]
if TYPE_CHECKING:
from .configuration_blip_a import (
BLIP_2_PRETRAINED_CONFIG_ARCHIVE_MAP,
BlipaConfig,
BlipaQFormerConfig,
BlipaVisionConfig,
)
from .processing_blip_a import BlipaProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blip_a import (
BLIP_2_PRETRAINED_MODEL_ARCHIVE_LIST,
BlipaForConditionalGeneration,
BlipaModel,
BlipaPreTrainedModel,
BlipaQFormerModel,
BlipaVisionModel,
)
else:
import sys
_a = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 322 |
import re
from typing import Callable, List, Optional, Union
import tensorflow as tf
try:
from tensorflow.keras.optimizers.legacy import Adam
except ImportError:
from tensorflow.keras.optimizers import Adam
class A_ ( tf.keras.optimizers.schedules.LearningRateSchedule ):
def __init__( self : Tuple , UpperCAmelCase : float , UpperCAmelCase : Callable , UpperCAmelCase : int , UpperCAmelCase : float = 1.0 , UpperCAmelCase : str = None , ) -> Union[str, Any]:
super().__init__()
__lowerCAmelCase: Optional[Any] = initial_learning_rate
__lowerCAmelCase: str = warmup_steps
__lowerCAmelCase: Optional[int] = power
__lowerCAmelCase: str = decay_schedule_fn
__lowerCAmelCase: Tuple = name
def __call__( self : int , UpperCAmelCase : Dict ) -> Optional[int]:
with tf.name_scope(self.name or 'WarmUp' ) as name:
# Implements polynomial warmup. i.e., if global_step < warmup_steps, the
# learning rate will be `global_step/num_warmup_steps * init_lr`.
__lowerCAmelCase: List[str] = tf.cast(UpperCAmelCase , tf.floataa )
__lowerCAmelCase: Tuple = tf.cast(self.warmup_steps , tf.floataa )
__lowerCAmelCase: List[str] = global_step_float / warmup_steps_float
__lowerCAmelCase: List[str] = self.initial_learning_rate * tf.math.pow(UpperCAmelCase , self.power )
return tf.cond(
global_step_float < warmup_steps_float , lambda: warmup_learning_rate , lambda: self.decay_schedule_fn(step - self.warmup_steps ) , name=UpperCAmelCase , )
def UpperCAmelCase ( self : Tuple ) -> int:
return {
"initial_learning_rate": self.initial_learning_rate,
"decay_schedule_fn": self.decay_schedule_fn,
"warmup_steps": self.warmup_steps,
"power": self.power,
"name": self.name,
}
def _a ( SCREAMING_SNAKE_CASE : float , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : float = 0.0 , SCREAMING_SNAKE_CASE : float = 0.9 , SCREAMING_SNAKE_CASE : float = 0.9_9_9 , SCREAMING_SNAKE_CASE : float = 1E-8 , SCREAMING_SNAKE_CASE : Optional[float] = None , SCREAMING_SNAKE_CASE : Optional[float] = None , SCREAMING_SNAKE_CASE : float = 0.0 , SCREAMING_SNAKE_CASE : float = 1.0 , SCREAMING_SNAKE_CASE : Optional[List[str]] = None , ) -> Optional[Any]:
"""simple docstring"""
__lowerCAmelCase: Tuple = tf.keras.optimizers.schedules.PolynomialDecay(
initial_learning_rate=SCREAMING_SNAKE_CASE , decay_steps=num_train_steps - num_warmup_steps , end_learning_rate=init_lr * min_lr_ratio , power=SCREAMING_SNAKE_CASE , )
if num_warmup_steps:
__lowerCAmelCase: Optional[int] = WarmUp(
initial_learning_rate=SCREAMING_SNAKE_CASE , decay_schedule_fn=SCREAMING_SNAKE_CASE , warmup_steps=SCREAMING_SNAKE_CASE , )
if weight_decay_rate > 0.0:
__lowerCAmelCase: List[Any] = AdamWeightDecay(
learning_rate=SCREAMING_SNAKE_CASE , weight_decay_rate=SCREAMING_SNAKE_CASE , beta_a=SCREAMING_SNAKE_CASE , beta_a=SCREAMING_SNAKE_CASE , epsilon=SCREAMING_SNAKE_CASE , clipnorm=SCREAMING_SNAKE_CASE , global_clipnorm=SCREAMING_SNAKE_CASE , exclude_from_weight_decay=['LayerNorm', 'layer_norm', 'bias'] , include_in_weight_decay=SCREAMING_SNAKE_CASE , )
else:
__lowerCAmelCase: Dict = tf.keras.optimizers.Adam(
learning_rate=SCREAMING_SNAKE_CASE , beta_a=SCREAMING_SNAKE_CASE , beta_a=SCREAMING_SNAKE_CASE , epsilon=SCREAMING_SNAKE_CASE , clipnorm=SCREAMING_SNAKE_CASE , global_clipnorm=SCREAMING_SNAKE_CASE , )
# We return the optimizer and the LR scheduler in order to better track the
# evolution of the LR independently of the optimizer.
return optimizer, lr_schedule
class A_ ( snake_case__ ):
def __init__( self : Tuple , UpperCAmelCase : Union[float, tf.keras.optimizers.schedules.LearningRateSchedule] = 0.001 , UpperCAmelCase : float = 0.9 , UpperCAmelCase : float = 0.999 , UpperCAmelCase : float = 1E-7 , UpperCAmelCase : bool = False , UpperCAmelCase : float = 0.0 , UpperCAmelCase : Optional[List[str]] = None , UpperCAmelCase : Optional[List[str]] = None , UpperCAmelCase : str = "AdamWeightDecay" , **UpperCAmelCase : str , ) -> int:
super().__init__(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , **UpperCAmelCase )
__lowerCAmelCase: List[Any] = weight_decay_rate
__lowerCAmelCase: List[str] = include_in_weight_decay
__lowerCAmelCase: Optional[Any] = exclude_from_weight_decay
@classmethod
def UpperCAmelCase ( cls : str , UpperCAmelCase : Tuple ) -> Optional[int]:
__lowerCAmelCase: Union[str, Any] = {'WarmUp': WarmUp}
return super(UpperCAmelCase , cls ).from_config(UpperCAmelCase , custom_objects=UpperCAmelCase )
def UpperCAmelCase ( self : Optional[int] , UpperCAmelCase : Any , UpperCAmelCase : int , UpperCAmelCase : Optional[int] ) -> Union[str, Any]:
super(UpperCAmelCase , self )._prepare_local(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
__lowerCAmelCase: Union[str, Any] = tf.constant(
self.weight_decay_rate , name='adam_weight_decay_rate' )
def UpperCAmelCase ( self : Dict , UpperCAmelCase : Optional[int] , UpperCAmelCase : List[str] , UpperCAmelCase : Optional[Any] ) -> List[str]:
__lowerCAmelCase: Dict = self._do_use_weight_decay(var.name )
if do_decay:
return var.assign_sub(
learning_rate * var * apply_state[(var.device, var.dtype.base_dtype)]['weight_decay_rate'] , use_locking=self._use_locking , )
return tf.no_op()
def UpperCAmelCase ( self : Optional[int] , UpperCAmelCase : Optional[Any] , UpperCAmelCase : Optional[int]=None , **UpperCAmelCase : Optional[Any] ) -> Union[str, Any]:
__lowerCAmelCase , __lowerCAmelCase: Tuple = list(zip(*UpperCAmelCase ) )
return super(UpperCAmelCase , self ).apply_gradients(zip(UpperCAmelCase , UpperCAmelCase ) , name=UpperCAmelCase , **UpperCAmelCase )
def UpperCAmelCase ( self : str , UpperCAmelCase : Optional[Any] , UpperCAmelCase : List[str] , UpperCAmelCase : Any ) -> str:
if apply_state is None:
return self._decayed_lr_t[var_dtype], {}
__lowerCAmelCase: Dict = apply_state or {}
__lowerCAmelCase: Union[str, Any] = apply_state.get((var_device, var_dtype) )
if coefficients is None:
__lowerCAmelCase: str = self._fallback_apply_state(UpperCAmelCase , UpperCAmelCase )
__lowerCAmelCase: Tuple = coefficients
return coefficients["lr_t"], {"apply_state": apply_state}
def UpperCAmelCase ( self : str , UpperCAmelCase : Tuple , UpperCAmelCase : Optional[Any] , UpperCAmelCase : List[Any]=None ) -> List[Any]:
__lowerCAmelCase , __lowerCAmelCase: Optional[int] = self._get_lr(var.device , var.dtype.base_dtype , UpperCAmelCase )
__lowerCAmelCase: Optional[int] = self._decay_weights_op(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
with tf.control_dependencies([decay] ):
return super(UpperCAmelCase , self )._resource_apply_dense(UpperCAmelCase , UpperCAmelCase , **UpperCAmelCase )
def UpperCAmelCase ( self : List[Any] , UpperCAmelCase : List[str] , UpperCAmelCase : Any , UpperCAmelCase : str , UpperCAmelCase : List[Any]=None ) -> List[str]:
__lowerCAmelCase , __lowerCAmelCase: Any = self._get_lr(var.device , var.dtype.base_dtype , UpperCAmelCase )
__lowerCAmelCase: str = self._decay_weights_op(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
with tf.control_dependencies([decay] ):
return super(UpperCAmelCase , self )._resource_apply_sparse(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , **UpperCAmelCase )
def UpperCAmelCase ( self : Union[str, Any] ) -> List[str]:
__lowerCAmelCase: List[str] = super().get_config()
config.update({'weight_decay_rate': self.weight_decay_rate} )
return config
def UpperCAmelCase ( self : Union[str, Any] , UpperCAmelCase : Union[str, Any] ) -> Union[str, Any]:
if self.weight_decay_rate == 0:
return False
if self._include_in_weight_decay:
for r in self._include_in_weight_decay:
if re.search(UpperCAmelCase , UpperCAmelCase ) is not None:
return True
if self._exclude_from_weight_decay:
for r in self._exclude_from_weight_decay:
if re.search(UpperCAmelCase , UpperCAmelCase ) is not None:
return False
return True
class A_ ( snake_case__ ):
def __init__( self : int ) -> List[Any]:
__lowerCAmelCase: Tuple = []
__lowerCAmelCase: int = None
@property
def UpperCAmelCase ( self : Dict ) -> List[Any]:
if self._accum_steps is None:
__lowerCAmelCase: List[Any] = tf.Variable(
tf.constant(0 , dtype=tf.intaa ) , trainable=UpperCAmelCase , synchronization=tf.VariableSynchronization.ON_READ , aggregation=tf.VariableAggregation.ONLY_FIRST_REPLICA , )
return self._accum_steps.value()
@property
def UpperCAmelCase ( self : Union[str, Any] ) -> int:
if not self._gradients:
raise ValueError('The accumulator should be called first to initialize the gradients' )
return [gradient.value() if gradient is not None else gradient for gradient in self._gradients]
def __call__( self : Optional[Any] , UpperCAmelCase : Any ) -> Any:
if not self._gradients:
__lowerCAmelCase: Any = self.step # Create the step variable.
self._gradients.extend(
[
tf.Variable(
tf.zeros_like(UpperCAmelCase ) , trainable=UpperCAmelCase , synchronization=tf.VariableSynchronization.ON_READ , aggregation=tf.VariableAggregation.ONLY_FIRST_REPLICA , )
if gradient is not None
else gradient
for gradient in gradients
] )
if len(UpperCAmelCase ) != len(self._gradients ):
raise ValueError(F'''Expected {len(self._gradients )} gradients, but got {len(UpperCAmelCase )}''' )
for accum_gradient, gradient in zip(self._gradients , UpperCAmelCase ):
if accum_gradient is not None and gradient is not None:
accum_gradient.assign_add(UpperCAmelCase )
self._accum_steps.assign_add(1 )
def UpperCAmelCase ( self : int ) -> int:
if not self._gradients:
return
self._accum_steps.assign(0 )
for gradient in self._gradients:
if gradient is not None:
gradient.assign(tf.zeros_like(UpperCAmelCase ) )
| 322 | 1 |
import os
from pickle import UnpicklingError
from typing import Dict, Tuple
import jax
import jax.numpy as jnp
import numpy as np
from flax.serialization import from_bytes
from flax.traverse_util import flatten_dict, unflatten_dict
import transformers
from .utils import logging
_a = logging.get_logger(__name__)
def _a ( SCREAMING_SNAKE_CASE : Union[str, Any] , SCREAMING_SNAKE_CASE : List[str] , SCREAMING_SNAKE_CASE : Any , SCREAMING_SNAKE_CASE : Dict=False ) -> Union[str, Any]:
"""simple docstring"""
try:
import torch # noqa: F401
except ImportError:
logger.error(
'Loading a PyTorch model in Flax, requires both PyTorch and Flax to be installed. Please see'
' https://pytorch.org/ and https://flax.readthedocs.io/en/latest/installation.html for installation'
' instructions.' )
raise
if not is_sharded:
__lowerCAmelCase: Any = os.path.abspath(SCREAMING_SNAKE_CASE )
logger.info(f'''Loading PyTorch weights from {pt_path}''' )
__lowerCAmelCase: Tuple = torch.load(SCREAMING_SNAKE_CASE , map_location='cpu' )
logger.info(f'''PyTorch checkpoint contains {sum(t.numel() for t in pt_state_dict.values() ):,} parameters.''' )
__lowerCAmelCase: int = convert_pytorch_state_dict_to_flax(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
else:
# model is sharded and pytorch_checkpoint_path already contains the list of .pt shard files
__lowerCAmelCase: Union[str, Any] = convert_pytorch_sharded_state_dict_to_flax(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
return flax_state_dict
def _a ( SCREAMING_SNAKE_CASE : Tuple[str] , SCREAMING_SNAKE_CASE : np.ndarray , SCREAMING_SNAKE_CASE : Dict[str, jnp.ndarray] , SCREAMING_SNAKE_CASE : str , ) -> (Tuple[str], np.ndarray):
"""simple docstring"""
def is_key_or_prefix_key_in_dict(SCREAMING_SNAKE_CASE : Tuple[str] ) -> bool:
return len(set(SCREAMING_SNAKE_CASE ) & {key, (model_prefix,) + key} ) > 0
# layer norm
__lowerCAmelCase: Union[str, Any] = pt_tuple_key[:-1] + ('scale',)
if pt_tuple_key[-1] in ["weight", "gamma"] and is_key_or_prefix_key_in_dict(SCREAMING_SNAKE_CASE ):
return renamed_pt_tuple_key, pt_tensor
# batch norm layer mean
__lowerCAmelCase: Union[str, Any] = pt_tuple_key[:-1] + ('mean',)
if pt_tuple_key[-1] == "running_mean" and not is_key_or_prefix_key_in_dict(SCREAMING_SNAKE_CASE ):
return renamed_pt_tuple_key, pt_tensor
# batch norm layer var
__lowerCAmelCase: Optional[Any] = pt_tuple_key[:-1] + ('var',)
if pt_tuple_key[-1] == "running_var" and not is_key_or_prefix_key_in_dict(SCREAMING_SNAKE_CASE ):
return renamed_pt_tuple_key, pt_tensor
# embedding
__lowerCAmelCase: Tuple = pt_tuple_key[:-1] + ('embedding',)
if pt_tuple_key[-1] == "weight" and is_key_or_prefix_key_in_dict(SCREAMING_SNAKE_CASE ):
return renamed_pt_tuple_key, pt_tensor
# conv layer
__lowerCAmelCase: Optional[Any] = pt_tuple_key[:-1] + ('kernel',)
if pt_tuple_key[-1] == "weight" and pt_tensor.ndim == 4 and not is_key_or_prefix_key_in_dict(SCREAMING_SNAKE_CASE ):
__lowerCAmelCase: int = pt_tensor.transpose(2 , 3 , 1 , 0 )
return renamed_pt_tuple_key, pt_tensor
# linear layer
__lowerCAmelCase: str = pt_tuple_key[:-1] + ('kernel',)
if pt_tuple_key[-1] == "weight" and not is_key_or_prefix_key_in_dict(SCREAMING_SNAKE_CASE ):
__lowerCAmelCase: Dict = pt_tensor.T
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm weight
__lowerCAmelCase: List[Any] = pt_tuple_key[:-1] + ('weight',)
if pt_tuple_key[-1] == "gamma":
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm bias
__lowerCAmelCase: List[Any] = pt_tuple_key[:-1] + ('bias',)
if pt_tuple_key[-1] == "beta":
return renamed_pt_tuple_key, pt_tensor
# New `weight_norm` from https://github.com/huggingface/transformers/pull/24030
__lowerCAmelCase: str = None
if pt_tuple_key[-3::2] == ("parametrizations", "original0"):
__lowerCAmelCase: Any = pt_tuple_key[-2] + '_g'
elif pt_tuple_key[-3::2] == ("parametrizations", "original1"):
__lowerCAmelCase: Dict = pt_tuple_key[-2] + '_v'
if name is not None:
__lowerCAmelCase: Tuple = pt_tuple_key[:-3] + (name,)
return renamed_pt_tuple_key, pt_tensor
return pt_tuple_key, pt_tensor
def _a ( SCREAMING_SNAKE_CASE : List[str] , SCREAMING_SNAKE_CASE : List[str] ) -> Any:
"""simple docstring"""
__lowerCAmelCase: str = {k: v.numpy() for k, v in pt_state_dict.items()}
__lowerCAmelCase: List[Any] = flax_model.base_model_prefix
# use params dict if the model contains batch norm layers
if "params" in flax_model.params:
__lowerCAmelCase: Optional[Any] = flax_model.params['params']
else:
__lowerCAmelCase: Optional[int] = flax_model.params
__lowerCAmelCase: Optional[Any] = flatten_dict(SCREAMING_SNAKE_CASE )
# add batch_stats keys,values to dict
if "batch_stats" in flax_model.params:
__lowerCAmelCase: Any = flatten_dict(flax_model.params['batch_stats'] )
random_flax_state_dict.update(SCREAMING_SNAKE_CASE )
__lowerCAmelCase: Any = {}
__lowerCAmelCase: Optional[int] = (model_prefix not in flax_model_params) and (
model_prefix in {k.split('.' )[0] for k in pt_state_dict.keys()}
)
__lowerCAmelCase: List[str] = (model_prefix in flax_model_params) and (
model_prefix not in {k.split('.' )[0] for k in pt_state_dict.keys()}
)
# Need to change some parameters name to match Flax names
for pt_key, pt_tensor in pt_state_dict.items():
__lowerCAmelCase: Tuple = tuple(pt_key.split('.' ) )
# remove base model prefix if necessary
__lowerCAmelCase: str = pt_tuple_key[0] == model_prefix
if load_model_with_head_into_base_model and has_base_model_prefix:
__lowerCAmelCase: Tuple = pt_tuple_key[1:]
# Correctly rename weight parameters
__lowerCAmelCase , __lowerCAmelCase: Optional[int] = rename_key_and_reshape_tensor(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# add model prefix if necessary
__lowerCAmelCase: str = (model_prefix,) + flax_key in random_flax_state_dict
if load_base_model_into_model_with_head and require_base_model_prefix:
__lowerCAmelCase: Dict = (model_prefix,) + flax_key
if flax_key in random_flax_state_dict:
if flax_tensor.shape != random_flax_state_dict[flax_key].shape:
raise ValueError(
f'''PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape '''
f'''{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}.''' )
# add batch stats if the model contains batchnorm layers
if "batch_stats" in flax_model.params:
if "mean" in flax_key[-1] or "var" in flax_key[-1]:
__lowerCAmelCase: int = jnp.asarray(SCREAMING_SNAKE_CASE )
continue
# remove num_batches_tracked key
if "num_batches_tracked" in flax_key[-1]:
flax_state_dict.pop(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
continue
# also add unexpected weight so that warning is thrown
__lowerCAmelCase: str = jnp.asarray(SCREAMING_SNAKE_CASE )
else:
# also add unexpected weight so that warning is thrown
__lowerCAmelCase: Union[str, Any] = jnp.asarray(SCREAMING_SNAKE_CASE )
return unflatten_dict(SCREAMING_SNAKE_CASE )
def _a ( SCREAMING_SNAKE_CASE : List[Any] , SCREAMING_SNAKE_CASE : Optional[int] ) -> List[Any]:
"""simple docstring"""
import torch
# Load the index
__lowerCAmelCase: Optional[int] = {}
for shard_file in shard_filenames:
# load using msgpack utils
__lowerCAmelCase: Any = torch.load(SCREAMING_SNAKE_CASE )
__lowerCAmelCase: Tuple = {k: v.numpy() for k, v in pt_state_dict.items()}
__lowerCAmelCase: Union[str, Any] = flax_model.base_model_prefix
# use params dict if the model contains batch norm layers and then add batch_stats keys,values to dict
if "batch_stats" in flax_model.params:
__lowerCAmelCase: Dict = flax_model.params['params']
__lowerCAmelCase: Any = flatten_dict(SCREAMING_SNAKE_CASE )
random_flax_state_dict.update(flatten_dict(flax_model.params['batch_stats'] ) )
else:
__lowerCAmelCase: Optional[int] = flax_model.params
__lowerCAmelCase: List[Any] = flatten_dict(SCREAMING_SNAKE_CASE )
__lowerCAmelCase: Optional[Any] = (model_prefix not in flax_model_params) and (
model_prefix in {k.split('.' )[0] for k in pt_state_dict.keys()}
)
__lowerCAmelCase: Tuple = (model_prefix in flax_model_params) and (
model_prefix not in {k.split('.' )[0] for k in pt_state_dict.keys()}
)
# Need to change some parameters name to match Flax names
for pt_key, pt_tensor in pt_state_dict.items():
__lowerCAmelCase: Any = tuple(pt_key.split('.' ) )
# remove base model prefix if necessary
__lowerCAmelCase: List[Any] = pt_tuple_key[0] == model_prefix
if load_model_with_head_into_base_model and has_base_model_prefix:
__lowerCAmelCase: Dict = pt_tuple_key[1:]
# Correctly rename weight parameters
__lowerCAmelCase , __lowerCAmelCase: List[str] = rename_key_and_reshape_tensor(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# add model prefix if necessary
__lowerCAmelCase: str = (model_prefix,) + flax_key in random_flax_state_dict
if load_base_model_into_model_with_head and require_base_model_prefix:
__lowerCAmelCase: Any = (model_prefix,) + flax_key
if flax_key in random_flax_state_dict:
if flax_tensor.shape != random_flax_state_dict[flax_key].shape:
raise ValueError(
f'''PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape '''
f'''{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}.''' )
# add batch stats if the model contains batchnorm layers
if "batch_stats" in flax_model.params:
if "mean" in flax_key[-1]:
__lowerCAmelCase: Optional[Any] = jnp.asarray(SCREAMING_SNAKE_CASE )
continue
if "var" in flax_key[-1]:
__lowerCAmelCase: int = jnp.asarray(SCREAMING_SNAKE_CASE )
continue
# remove num_batches_tracked key
if "num_batches_tracked" in flax_key[-1]:
flax_state_dict.pop(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
continue
# also add unexpected weight so that warning is thrown
__lowerCAmelCase: Optional[int] = jnp.asarray(SCREAMING_SNAKE_CASE )
else:
# also add unexpected weight so that warning is thrown
__lowerCAmelCase: Union[str, Any] = jnp.asarray(SCREAMING_SNAKE_CASE )
return unflatten_dict(SCREAMING_SNAKE_CASE )
def _a ( SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : Optional[int] ) -> List[str]:
"""simple docstring"""
__lowerCAmelCase: Any = os.path.abspath(SCREAMING_SNAKE_CASE )
logger.info(f'''Loading Flax weights from {flax_checkpoint_path}''' )
# import correct flax class
__lowerCAmelCase: Dict = getattr(SCREAMING_SNAKE_CASE , 'Flax' + model.__class__.__name__ )
# load flax weight dict
with open(SCREAMING_SNAKE_CASE , 'rb' ) as state_f:
try:
__lowerCAmelCase: Tuple = from_bytes(SCREAMING_SNAKE_CASE , state_f.read() )
except UnpicklingError:
raise EnvironmentError(f'''Unable to convert {flax_checkpoint_path} to Flax deserializable object. ''' )
return load_flax_weights_in_pytorch_model(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
def _a ( SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
try:
import torch # noqa: F401
except ImportError:
logger.error(
'Loading a Flax weights in PyTorch, requires both PyTorch and Flax to be installed. Please see'
' https://pytorch.org/ and https://flax.readthedocs.io/en/latest/installation.html for installation'
' instructions.' )
raise
# check if we have bf16 weights
__lowerCAmelCase: Optional[int] = flatten_dict(jax.tree_util.tree_map(lambda SCREAMING_SNAKE_CASE : x.dtype == jnp.bfloataa , SCREAMING_SNAKE_CASE ) ).values()
if any(SCREAMING_SNAKE_CASE ):
# convert all weights to fp32 if the are bf16 since torch.from_numpy can-not handle bf16
# and bf16 is not fully supported in PT yet.
logger.warning(
'Found ``bfloat16`` weights in Flax model. Casting all ``bfloat16`` weights to ``float32`` '
'before loading those in PyTorch model.' )
__lowerCAmelCase: List[Any] = jax.tree_util.tree_map(
lambda SCREAMING_SNAKE_CASE : params.astype(np.floataa ) if params.dtype == jnp.bfloataa else params , SCREAMING_SNAKE_CASE )
__lowerCAmelCase: int = flatten_dict(SCREAMING_SNAKE_CASE )
__lowerCAmelCase: Optional[Any] = pt_model.state_dict()
__lowerCAmelCase: int = (pt_model.base_model_prefix in flax_state) and (
pt_model.base_model_prefix not in {k.split('.' )[0] for k in pt_model_dict.keys()}
)
__lowerCAmelCase: Dict = (pt_model.base_model_prefix not in flax_state) and (
pt_model.base_model_prefix in {k.split('.' )[0] for k in pt_model_dict.keys()}
)
# keep track of unexpected & missing keys
__lowerCAmelCase: List[str] = []
__lowerCAmelCase: Any = set(pt_model_dict.keys() )
for flax_key_tuple, flax_tensor in flax_state_dict.items():
__lowerCAmelCase: str = flax_key_tuple[0] == pt_model.base_model_prefix
__lowerCAmelCase: str = '.'.join((pt_model.base_model_prefix,) + flax_key_tuple ) in pt_model_dict
# adapt flax_key to prepare for loading from/to base model only
if load_model_with_head_into_base_model and has_base_model_prefix:
__lowerCAmelCase: Optional[int] = flax_key_tuple[1:]
elif load_base_model_into_model_with_head and require_base_model_prefix:
__lowerCAmelCase: List[Any] = (pt_model.base_model_prefix,) + flax_key_tuple
# rename flax weights to PyTorch format
if flax_key_tuple[-1] == "kernel" and flax_tensor.ndim == 4 and ".".join(SCREAMING_SNAKE_CASE ) not in pt_model_dict:
# conv layer
__lowerCAmelCase: str = flax_key_tuple[:-1] + ('weight',)
__lowerCAmelCase: Dict = jnp.transpose(SCREAMING_SNAKE_CASE , (3, 2, 0, 1) )
elif flax_key_tuple[-1] == "kernel" and ".".join(SCREAMING_SNAKE_CASE ) not in pt_model_dict:
# linear layer
__lowerCAmelCase: Union[str, Any] = flax_key_tuple[:-1] + ('weight',)
__lowerCAmelCase: List[Any] = flax_tensor.T
elif flax_key_tuple[-1] in ["scale", "embedding"]:
__lowerCAmelCase: Any = flax_key_tuple[:-1] + ('weight',)
# adding batch stats from flax batch norm to pt
elif "mean" in flax_key_tuple[-1]:
__lowerCAmelCase: str = flax_key_tuple[:-1] + ('running_mean',)
elif "var" in flax_key_tuple[-1]:
__lowerCAmelCase: Tuple = flax_key_tuple[:-1] + ('running_var',)
if "batch_stats" in flax_state:
__lowerCAmelCase: Union[str, Any] = '.'.join(flax_key_tuple[1:] ) # Remove the params/batch_stats header
else:
__lowerCAmelCase: List[Any] = '.'.join(SCREAMING_SNAKE_CASE )
# We also need to look at `pt_model_dict` and see if there are keys requiring further transformation.
__lowerCAmelCase: List[Any] = {}
# New `weight_norm` from https://github.com/huggingface/transformers/pull/24030
for key in pt_model_dict:
__lowerCAmelCase: Dict = key.split('.' )
__lowerCAmelCase: Optional[Any] = None
if key_components[-3::2] == ["parametrizations", "original0"]:
__lowerCAmelCase: List[str] = key_components[-2] + '_g'
elif key_components[-3::2] == ["parametrizations", "original1"]:
__lowerCAmelCase: Any = key_components[-2] + '_v'
if name is not None:
__lowerCAmelCase: List[str] = key_components[:-3] + [name]
__lowerCAmelCase: Tuple = '.'.join(SCREAMING_SNAKE_CASE )
__lowerCAmelCase: List[str] = key
if flax_key in special_pt_names:
__lowerCAmelCase: Dict = special_pt_names[flax_key]
if flax_key in pt_model_dict:
if flax_tensor.shape != pt_model_dict[flax_key].shape:
raise ValueError(
f'''Flax checkpoint seems to be incorrect. Weight {flax_key_tuple} was expected '''
f'''to be of shape {pt_model_dict[flax_key].shape}, but is {flax_tensor.shape}.''' )
else:
# add weight to pytorch dict
__lowerCAmelCase: Union[str, Any] = np.asarray(SCREAMING_SNAKE_CASE ) if not isinstance(SCREAMING_SNAKE_CASE , np.ndarray ) else flax_tensor
__lowerCAmelCase: Union[str, Any] = torch.from_numpy(SCREAMING_SNAKE_CASE )
# remove from missing keys
missing_keys.remove(SCREAMING_SNAKE_CASE )
else:
# weight is not expected by PyTorch model
unexpected_keys.append(SCREAMING_SNAKE_CASE )
pt_model.load_state_dict(SCREAMING_SNAKE_CASE )
# re-transform missing_keys to list
__lowerCAmelCase: Any = list(SCREAMING_SNAKE_CASE )
if len(SCREAMING_SNAKE_CASE ) > 0:
logger.warning(
'Some weights of the Flax model were not used when initializing the PyTorch model'
f''' {pt_model.__class__.__name__}: {unexpected_keys}\n- This IS expected if you are initializing'''
f''' {pt_model.__class__.__name__} from a Flax model trained on another task or with another architecture'''
' (e.g. initializing a BertForSequenceClassification model from a FlaxBertForPreTraining model).\n- This'
f''' IS NOT expected if you are initializing {pt_model.__class__.__name__} from a Flax model that you expect'''
' to be exactly identical (e.g. initializing a BertForSequenceClassification model from a'
' FlaxBertForSequenceClassification model).' )
else:
logger.warning(f'''All Flax model weights were used when initializing {pt_model.__class__.__name__}.\n''' )
if len(SCREAMING_SNAKE_CASE ) > 0:
logger.warning(
f'''Some weights of {pt_model.__class__.__name__} were not initialized from the Flax model and are newly'''
f''' initialized: {missing_keys}\nYou should probably TRAIN this model on a down-stream task to be able to'''
' use it for predictions and inference.' )
else:
logger.warning(
f'''All the weights of {pt_model.__class__.__name__} were initialized from the Flax model.\n'''
'If your task is similar to the task the model of the checkpoint was trained on, '
f'''you can already use {pt_model.__class__.__name__} for predictions without further training.''' )
return pt_model
| 322 |
import math
from typing import Callable, List, Optional, Union
import numpy as np
import PIL
import torch
from PIL import Image
from transformers import CLIPTextModel, CLIPTokenizer
from diffusers.models import AutoencoderKL, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_upscale import StableDiffusionUpscalePipeline
from diffusers.schedulers import DDIMScheduler, DDPMScheduler, LMSDiscreteScheduler, PNDMScheduler
def _a ( SCREAMING_SNAKE_CASE : Any , SCREAMING_SNAKE_CASE : Any , SCREAMING_SNAKE_CASE : Optional[Any]=[] ) -> str:
"""simple docstring"""
__lowerCAmelCase: Optional[int] = size[0] - overlap_pixels * 2
__lowerCAmelCase: str = size[1] - overlap_pixels * 2
for letter in ["l", "r"]:
if letter in remove_borders:
size_x += overlap_pixels
for letter in ["t", "b"]:
if letter in remove_borders:
size_y += overlap_pixels
__lowerCAmelCase: Any = np.ones((size_y, size_x) , dtype=np.uinta ) * 2_55
__lowerCAmelCase: int = np.pad(SCREAMING_SNAKE_CASE , mode='linear_ramp' , pad_width=SCREAMING_SNAKE_CASE , end_values=0 )
if "l" in remove_borders:
__lowerCAmelCase: Dict = mask[:, overlap_pixels : mask.shape[1]]
if "r" in remove_borders:
__lowerCAmelCase: Tuple = mask[:, 0 : mask.shape[1] - overlap_pixels]
if "t" in remove_borders:
__lowerCAmelCase: List[Any] = mask[overlap_pixels : mask.shape[0], :]
if "b" in remove_borders:
__lowerCAmelCase: List[str] = mask[0 : mask.shape[0] - overlap_pixels, :]
return mask
def _a ( SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : List[str] ) -> List[Any]:
"""simple docstring"""
return max(SCREAMING_SNAKE_CASE , min(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) )
def _a ( SCREAMING_SNAKE_CASE : [int] , SCREAMING_SNAKE_CASE : [int] , SCREAMING_SNAKE_CASE : [int] ) -> int:
"""simple docstring"""
return (
clamp(rect[0] , min[0] , max[0] ),
clamp(rect[1] , min[1] , max[1] ),
clamp(rect[2] , min[0] , max[0] ),
clamp(rect[3] , min[1] , max[1] ),
)
def _a ( SCREAMING_SNAKE_CASE : [int] , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : [int] ) -> Union[str, Any]:
"""simple docstring"""
__lowerCAmelCase: Tuple = list(SCREAMING_SNAKE_CASE )
rect[0] -= overlap
rect[1] -= overlap
rect[2] += overlap
rect[3] += overlap
__lowerCAmelCase: int = clamp_rect(SCREAMING_SNAKE_CASE , [0, 0] , [image_size[0], image_size[1]] )
return rect
def _a ( SCREAMING_SNAKE_CASE : Dict , SCREAMING_SNAKE_CASE : Dict , SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : Optional[Any] ) -> Any:
"""simple docstring"""
__lowerCAmelCase: List[Any] = Image.new('RGB' , (tile.size[0] + original_slice, tile.size[1]) )
result.paste(
original_image.resize((tile.size[0], tile.size[1]) , Image.BICUBIC ).crop(
(slice_x, 0, slice_x + original_slice, tile.size[1]) ) , (0, 0) , )
result.paste(SCREAMING_SNAKE_CASE , (original_slice, 0) )
return result
def _a ( SCREAMING_SNAKE_CASE : List[Any] , SCREAMING_SNAKE_CASE : Any ) -> str:
"""simple docstring"""
__lowerCAmelCase: Union[str, Any] = (original_image_slice * 4, 0, tile.size[0], tile.size[1])
__lowerCAmelCase: List[Any] = tile.crop(SCREAMING_SNAKE_CASE )
return tile
def _a ( SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : List[Any] ) -> Optional[int]:
"""simple docstring"""
__lowerCAmelCase: List[str] = n % d
return n - divisor
class A_ ( snake_case__ ):
def __init__( self : Optional[Any] , UpperCAmelCase : AutoencoderKL , UpperCAmelCase : CLIPTextModel , UpperCAmelCase : CLIPTokenizer , UpperCAmelCase : UNetaDConditionModel , UpperCAmelCase : DDPMScheduler , UpperCAmelCase : Union[DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler] , UpperCAmelCase : int = 3_5_0 , ) -> Optional[Any]:
super().__init__(
vae=UpperCAmelCase , text_encoder=UpperCAmelCase , tokenizer=UpperCAmelCase , unet=UpperCAmelCase , low_res_scheduler=UpperCAmelCase , scheduler=UpperCAmelCase , max_noise_level=UpperCAmelCase , )
def UpperCAmelCase ( self : Any , UpperCAmelCase : List[str] , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : str , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : Optional[int] , UpperCAmelCase : Dict , UpperCAmelCase : str , **UpperCAmelCase : List[Any] ) -> Optional[int]:
torch.manual_seed(0 )
__lowerCAmelCase: Optional[int] = (
min(image.size[0] - (tile_size + original_image_slice) , x * tile_size ),
min(image.size[1] - (tile_size + original_image_slice) , y * tile_size ),
min(image.size[0] , (x + 1) * tile_size ),
min(image.size[1] , (y + 1) * tile_size ),
)
__lowerCAmelCase: Optional[Any] = add_overlap_rect(UpperCAmelCase , UpperCAmelCase , image.size )
__lowerCAmelCase: Any = image.crop(UpperCAmelCase )
__lowerCAmelCase: Any = ((crop_rect[0] + ((crop_rect[2] - crop_rect[0]) / 2)) / image.size[0]) * tile.size[0]
__lowerCAmelCase: Tuple = translated_slice_x - (original_image_slice / 2)
__lowerCAmelCase: Union[str, Any] = max(0 , UpperCAmelCase )
__lowerCAmelCase: Optional[int] = squeeze_tile(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
__lowerCAmelCase: Union[str, Any] = to_input.size
__lowerCAmelCase: List[Any] = to_input.resize((tile_size, tile_size) , Image.BICUBIC )
__lowerCAmelCase: int = super(UpperCAmelCase , self ).__call__(image=UpperCAmelCase , **UpperCAmelCase ).images[0]
__lowerCAmelCase: Dict = upscaled_tile.resize((orig_input_size[0] * 4, orig_input_size[1] * 4) , Image.BICUBIC )
__lowerCAmelCase: Union[str, Any] = unsqueeze_tile(UpperCAmelCase , UpperCAmelCase )
__lowerCAmelCase: Optional[int] = upscaled_tile.resize((tile.size[0] * 4, tile.size[1] * 4) , Image.BICUBIC )
__lowerCAmelCase: Optional[int] = []
if x == 0:
remove_borders.append('l' )
elif crop_rect[2] == image.size[0]:
remove_borders.append('r' )
if y == 0:
remove_borders.append('t' )
elif crop_rect[3] == image.size[1]:
remove_borders.append('b' )
__lowerCAmelCase: int = Image.fromarray(
make_transparency_mask(
(upscaled_tile.size[0], upscaled_tile.size[1]) , tile_border * 4 , remove_borders=UpperCAmelCase ) , mode='L' , )
final_image.paste(
UpperCAmelCase , (crop_rect_with_overlap[0] * 4, crop_rect_with_overlap[1] * 4) , UpperCAmelCase )
@torch.no_grad()
def __call__( self : Optional[Any] , UpperCAmelCase : Union[str, List[str]] , UpperCAmelCase : Union[PIL.Image.Image, List[PIL.Image.Image]] , UpperCAmelCase : int = 7_5 , UpperCAmelCase : float = 9.0 , UpperCAmelCase : int = 5_0 , UpperCAmelCase : Optional[Union[str, List[str]]] = None , UpperCAmelCase : Optional[int] = 1 , UpperCAmelCase : float = 0.0 , UpperCAmelCase : Optional[torch.Generator] = None , UpperCAmelCase : Optional[torch.FloatTensor] = None , UpperCAmelCase : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , UpperCAmelCase : int = 1 , UpperCAmelCase : int = 1_2_8 , UpperCAmelCase : int = 3_2 , UpperCAmelCase : int = 3_2 , ) -> str:
__lowerCAmelCase: List[Any] = Image.new('RGB' , (image.size[0] * 4, image.size[1] * 4) )
__lowerCAmelCase: str = math.ceil(image.size[0] / tile_size )
__lowerCAmelCase: List[Any] = math.ceil(image.size[1] / tile_size )
__lowerCAmelCase: Optional[Any] = tcx * tcy
__lowerCAmelCase: Tuple = 0
for y in range(UpperCAmelCase ):
for x in range(UpperCAmelCase ):
self._process_tile(
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , prompt=UpperCAmelCase , num_inference_steps=UpperCAmelCase , guidance_scale=UpperCAmelCase , noise_level=UpperCAmelCase , negative_prompt=UpperCAmelCase , num_images_per_prompt=UpperCAmelCase , eta=UpperCAmelCase , generator=UpperCAmelCase , latents=UpperCAmelCase , )
current_count += 1
if callback is not None:
callback({'progress': current_count / total_tile_count, 'image': final_image} )
return final_image
def _a ( ) -> int:
"""simple docstring"""
__lowerCAmelCase: Any = 'stabilityai/stable-diffusion-x4-upscaler'
__lowerCAmelCase: Dict = StableDiffusionTiledUpscalePipeline.from_pretrained(SCREAMING_SNAKE_CASE , revision='fp16' , torch_dtype=torch.floataa )
__lowerCAmelCase: Optional[Any] = pipe.to('cuda' )
__lowerCAmelCase: Tuple = Image.open('../../docs/source/imgs/diffusers_library.jpg' )
def callback(SCREAMING_SNAKE_CASE : Tuple ):
print(f'''progress: {obj['progress']:.4f}''' )
obj["image"].save('diffusers_library_progress.jpg' )
__lowerCAmelCase: str = pipe(image=SCREAMING_SNAKE_CASE , prompt='Black font, white background, vector' , noise_level=40 , callback=SCREAMING_SNAKE_CASE )
final_image.save('diffusers_library.jpg' )
if __name__ == "__main__":
main()
| 322 | 1 |
import numpy as np
from matplotlib import pyplot as plt
from sklearn.datasets import load_iris
from sklearn.metrics import ConfusionMatrixDisplay
from sklearn.model_selection import train_test_split
from xgboost import XGBClassifier
def _a ( SCREAMING_SNAKE_CASE : dict ) -> tuple:
"""simple docstring"""
return (data["data"], data["target"])
def _a ( SCREAMING_SNAKE_CASE : np.ndarray , SCREAMING_SNAKE_CASE : np.ndarray ) -> XGBClassifier:
"""simple docstring"""
__lowerCAmelCase: int = XGBClassifier()
classifier.fit(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
return classifier
def _a ( ) -> None:
"""simple docstring"""
__lowerCAmelCase: Any = load_iris()
__lowerCAmelCase , __lowerCAmelCase: Tuple = data_handling(SCREAMING_SNAKE_CASE )
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase: int = train_test_split(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , test_size=0.2_5 )
__lowerCAmelCase: Optional[Any] = iris['target_names']
# Create an XGBoost Classifier from the training data
__lowerCAmelCase: List[str] = xgboost(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# Display the confusion matrix of the classifier with both training and test sets
ConfusionMatrixDisplay.from_estimator(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , display_labels=SCREAMING_SNAKE_CASE , cmap='Blues' , normalize='true' , )
plt.title('Normalized Confusion Matrix - IRIS Dataset' )
plt.show()
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
main()
| 322 |
def _a ( SCREAMING_SNAKE_CASE : str ) -> Union[str, Any]:
"""simple docstring"""
__lowerCAmelCase: str = len(SCREAMING_SNAKE_CASE )
__lowerCAmelCase: List[Any] = sum(SCREAMING_SNAKE_CASE )
__lowerCAmelCase: str = [[False for x in range(s + 1 )] for y in range(n + 1 )]
for i in range(1 , n + 1 ):
__lowerCAmelCase: Tuple = True
for i in range(1 , s + 1 ):
__lowerCAmelCase: Any = False
for i in range(1 , n + 1 ):
for j in range(1 , s + 1 ):
__lowerCAmelCase: Optional[int] = dp[i][j - 1]
if arr[i - 1] <= j:
__lowerCAmelCase: Union[str, Any] = dp[i][j] or dp[i - 1][j - arr[i - 1]]
for j in range(int(s / 2 ) , -1 , -1 ):
if dp[n][j] is True:
__lowerCAmelCase: Tuple = s - 2 * j
break
return diff
| 322 | 1 |
import logging
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
import torch
from datasets import load_dataset
from torchvision.transforms import Compose, Lambda, Normalize, RandomHorizontalFlip, RandomResizedCrop, ToTensor
from torchvision.transforms.functional import InterpolationMode
import transformers
from transformers import (
HfArgumentParser,
Trainer,
TrainingArguments,
ViTImageProcessor,
ViTMAEConfig,
ViTMAEForPreTraining,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
_a = logging.getLogger(__name__)
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version('''4.31.0''')
require_version('''datasets>=1.8.0''', '''To fix: pip install -r examples/pytorch/image-pretraining/requirements.txt''')
@dataclass
class A_ :
_lowercase : Optional[str] = field(
default='cifar10' , metadata={'help': 'Name of a dataset from the datasets package'} )
_lowercase : Optional[str] = field(
default=snake_case__ , metadata={'help': 'The configuration name of the dataset to use (via the datasets library).'} )
_lowercase : Optional[str] = field(
default=snake_case__ , metadata={'help': 'The column name of the images in the files.'} )
_lowercase : Optional[str] = field(default=snake_case__ , metadata={'help': 'A folder containing the training data.'} )
_lowercase : Optional[str] = field(default=snake_case__ , metadata={'help': 'A folder containing the validation data.'} )
_lowercase : Optional[float] = field(
default=0.15 , metadata={'help': 'Percent to split off of train for validation.'} )
_lowercase : Optional[int] = field(
default=snake_case__ , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of training examples to this '
'value if set.'
)
} , )
_lowercase : Optional[int] = field(
default=snake_case__ , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of evaluation examples to this '
'value if set.'
)
} , )
def UpperCAmelCase ( self : Any ) -> str:
__lowerCAmelCase: int = {}
if self.train_dir is not None:
__lowerCAmelCase: str = self.train_dir
if self.validation_dir is not None:
__lowerCAmelCase: Dict = self.validation_dir
__lowerCAmelCase: Any = data_files if data_files else None
@dataclass
class A_ :
_lowercase : str = field(
default=snake_case__ , metadata={
'help': (
'The model checkpoint for weights initialization.Don\'t set if you want to train a model from scratch.'
)
} , )
_lowercase : Optional[str] = field(
default=snake_case__ , metadata={'help': 'Pretrained config name or path if not the same as model_name_or_path'} )
_lowercase : Optional[str] = field(
default=snake_case__ , metadata={
'help': (
'Override some existing default config settings when a model is trained from scratch. Example: '
'n_embd=10,resid_pdrop=0.2,scale_attn_weights=false,summary_type=cls_index'
)
} , )
_lowercase : Optional[str] = field(
default=snake_case__ , metadata={'help': 'Where do you want to store the pretrained models downloaded from s3'} )
_lowercase : str = field(
default='main' , metadata={'help': 'The specific model version to use (can be a branch name, tag name or commit id).'} , )
_lowercase : str = field(default=snake_case__ , metadata={'help': 'Name or path of preprocessor config.'} )
_lowercase : bool = field(
default=snake_case__ , metadata={
'help': (
'Will use the token generated when running `huggingface-cli login` (necessary to use this script '
'with private models).'
)
} , )
_lowercase : float = field(
default=0.75 , metadata={'help': 'The ratio of the number of masked tokens in the input sequence.'} )
_lowercase : bool = field(
default=snake_case__ , metadata={'help': 'Whether or not to train with normalized pixel values as target.'} )
@dataclass
class A_ ( snake_case__ ):
_lowercase : float = field(
default=1E-3 , metadata={'help': 'Base learning rate: absolute_lr = base_lr * total_batch_size / 256.'} )
def _a ( SCREAMING_SNAKE_CASE : Tuple ) -> Dict:
"""simple docstring"""
__lowerCAmelCase: Optional[int] = torch.stack([example['pixel_values'] for example in examples] )
return {"pixel_values": pixel_values}
def _a ( ) -> Union[str, Any]:
"""simple docstring"""
__lowerCAmelCase: Optional[Any] = HfArgumentParser((ModelArguments, DataTrainingArguments, CustomTrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('.json' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase: List[str] = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase: Optional[Any] = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry('run_mae' , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# Setup logging
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
__lowerCAmelCase: Union[str, Any] = training_args.get_process_log_level()
logger.setLevel(SCREAMING_SNAKE_CASE )
transformers.utils.logging.set_verbosity(SCREAMING_SNAKE_CASE )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
f'''Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}'''
+ f'''distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}''' )
logger.info(f'''Training/evaluation parameters {training_args}''' )
# Detecting last checkpoint.
__lowerCAmelCase: Union[str, Any] = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
__lowerCAmelCase: int = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
f'''Output directory ({training_args.output_dir}) already exists and is not empty. '''
'Use --overwrite_output_dir to overcome.' )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
f'''Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change '''
'the `--output_dir` or add `--overwrite_output_dir` to train from scratch.' )
# Initialize our dataset.
__lowerCAmelCase: Optional[int] = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , data_files=data_args.data_files , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
# If we don't have a validation split, split off a percentage of train as validation.
__lowerCAmelCase: Any = None if 'validation' in ds.keys() else data_args.train_val_split
if isinstance(data_args.train_val_split , SCREAMING_SNAKE_CASE ) and data_args.train_val_split > 0.0:
__lowerCAmelCase: Optional[int] = ds['train'].train_test_split(data_args.train_val_split )
__lowerCAmelCase: Optional[Any] = split['train']
__lowerCAmelCase: Dict = split['test']
# Load pretrained model and image processor
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
__lowerCAmelCase: Any = {
'cache_dir': model_args.cache_dir,
'revision': model_args.model_revision,
'use_auth_token': True if model_args.use_auth_token else None,
}
if model_args.config_name:
__lowerCAmelCase: Any = ViTMAEConfig.from_pretrained(model_args.config_name , **SCREAMING_SNAKE_CASE )
elif model_args.model_name_or_path:
__lowerCAmelCase: Tuple = ViTMAEConfig.from_pretrained(model_args.model_name_or_path , **SCREAMING_SNAKE_CASE )
else:
__lowerCAmelCase: Any = ViTMAEConfig()
logger.warning('You are instantiating a new config instance from scratch.' )
if model_args.config_overrides is not None:
logger.info(f'''Overriding config: {model_args.config_overrides}''' )
config.update_from_string(model_args.config_overrides )
logger.info(f'''New config: {config}''' )
# adapt config
config.update(
{
'mask_ratio': model_args.mask_ratio,
'norm_pix_loss': model_args.norm_pix_loss,
} )
# create image processor
if model_args.image_processor_name:
__lowerCAmelCase: Optional[Any] = ViTImageProcessor.from_pretrained(model_args.image_processor_name , **SCREAMING_SNAKE_CASE )
elif model_args.model_name_or_path:
__lowerCAmelCase: str = ViTImageProcessor.from_pretrained(model_args.model_name_or_path , **SCREAMING_SNAKE_CASE )
else:
__lowerCAmelCase: Any = ViTImageProcessor()
# create model
if model_args.model_name_or_path:
__lowerCAmelCase: List[str] = ViTMAEForPreTraining.from_pretrained(
model_args.model_name_or_path , from_tf=bool('.ckpt' in model_args.model_name_or_path ) , config=SCREAMING_SNAKE_CASE , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
else:
logger.info('Training new model from scratch' )
__lowerCAmelCase: Tuple = ViTMAEForPreTraining(SCREAMING_SNAKE_CASE )
if training_args.do_train:
__lowerCAmelCase: Tuple = ds['train'].column_names
else:
__lowerCAmelCase: Dict = ds['validation'].column_names
if data_args.image_column_name is not None:
__lowerCAmelCase: Union[str, Any] = data_args.image_column_name
elif "image" in column_names:
__lowerCAmelCase: Optional[int] = 'image'
elif "img" in column_names:
__lowerCAmelCase: List[str] = 'img'
else:
__lowerCAmelCase: Optional[Any] = column_names[0]
# transformations as done in original MAE paper
# source: https://github.com/facebookresearch/mae/blob/main/main_pretrain.py
if "shortest_edge" in image_processor.size:
__lowerCAmelCase: Any = image_processor.size['shortest_edge']
else:
__lowerCAmelCase: Optional[int] = (image_processor.size['height'], image_processor.size['width'])
__lowerCAmelCase: int = Compose(
[
Lambda(lambda SCREAMING_SNAKE_CASE : img.convert('RGB' ) if img.mode != "RGB" else img ),
RandomResizedCrop(SCREAMING_SNAKE_CASE , scale=(0.2, 1.0) , interpolation=InterpolationMode.BICUBIC ),
RandomHorizontalFlip(),
ToTensor(),
Normalize(mean=image_processor.image_mean , std=image_processor.image_std ),
] )
def preprocess_images(SCREAMING_SNAKE_CASE : List[str] ):
__lowerCAmelCase: str = [transforms(SCREAMING_SNAKE_CASE ) for image in examples[image_column_name]]
return examples
if training_args.do_train:
if "train" not in ds:
raise ValueError('--do_train requires a train dataset' )
if data_args.max_train_samples is not None:
__lowerCAmelCase: List[str] = ds['train'].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) )
# Set the training transforms
ds["train"].set_transform(SCREAMING_SNAKE_CASE )
if training_args.do_eval:
if "validation" not in ds:
raise ValueError('--do_eval requires a validation dataset' )
if data_args.max_eval_samples is not None:
__lowerCAmelCase: Dict = (
ds['validation'].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) )
)
# Set the validation transforms
ds["validation"].set_transform(SCREAMING_SNAKE_CASE )
# Compute absolute learning rate
__lowerCAmelCase: Any = (
training_args.train_batch_size * training_args.gradient_accumulation_steps * training_args.world_size
)
if training_args.base_learning_rate is not None:
__lowerCAmelCase: str = training_args.base_learning_rate * total_train_batch_size / 2_56
# Initialize our trainer
__lowerCAmelCase: List[str] = Trainer(
model=SCREAMING_SNAKE_CASE , args=SCREAMING_SNAKE_CASE , train_dataset=ds['train'] if training_args.do_train else None , eval_dataset=ds['validation'] if training_args.do_eval else None , tokenizer=SCREAMING_SNAKE_CASE , data_collator=SCREAMING_SNAKE_CASE , )
# Training
if training_args.do_train:
__lowerCAmelCase: Dict = None
if training_args.resume_from_checkpoint is not None:
__lowerCAmelCase: Tuple = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
__lowerCAmelCase: Union[str, Any] = last_checkpoint
__lowerCAmelCase: List[Any] = trainer.train(resume_from_checkpoint=SCREAMING_SNAKE_CASE )
trainer.save_model()
trainer.log_metrics('train' , train_result.metrics )
trainer.save_metrics('train' , train_result.metrics )
trainer.save_state()
# Evaluation
if training_args.do_eval:
__lowerCAmelCase: int = trainer.evaluate()
trainer.log_metrics('eval' , SCREAMING_SNAKE_CASE )
trainer.save_metrics('eval' , SCREAMING_SNAKE_CASE )
# Write model card and (optionally) push to hub
__lowerCAmelCase: str = {
'tasks': 'masked-auto-encoding',
'dataset': data_args.dataset_name,
'tags': ['masked-auto-encoding'],
}
if training_args.push_to_hub:
trainer.push_to_hub(**SCREAMING_SNAKE_CASE )
else:
trainer.create_model_card(**SCREAMING_SNAKE_CASE )
def _a ( SCREAMING_SNAKE_CASE : Union[str, Any] ) -> str:
"""simple docstring"""
main()
if __name__ == "__main__":
main()
| 322 |
from __future__ import annotations
def _a ( SCREAMING_SNAKE_CASE : list[int] , SCREAMING_SNAKE_CASE : int ) -> list[int]:
"""simple docstring"""
__lowerCAmelCase: int = 0
__lowerCAmelCase: Tuple = len(SCREAMING_SNAKE_CASE ) - 1
while i < j:
if nums[i] + nums[j] == target:
return [i, j]
elif nums[i] + nums[j] < target:
__lowerCAmelCase: Tuple = i + 1
else:
__lowerCAmelCase: List[str] = j - 1
return []
if __name__ == "__main__":
import doctest
doctest.testmod()
print(f"{two_pointer([2, 7, 1_1, 1_5], 9) = }")
| 322 | 1 |
import unittest
from parameterized import parameterized
from transformers import OpenLlamaConfig, is_torch_available, set_seed
from transformers.testing_utils import require_torch, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import OpenLlamaForCausalLM, OpenLlamaForSequenceClassification, OpenLlamaModel
class A_ :
def __init__( self : Any , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : Any=1_3 , UpperCAmelCase : Union[str, Any]=7 , UpperCAmelCase : Any=True , UpperCAmelCase : List[str]=True , UpperCAmelCase : Dict=False , UpperCAmelCase : Optional[int]=True , UpperCAmelCase : List[Any]=9_9 , UpperCAmelCase : List[Any]=3_2 , UpperCAmelCase : Dict=5 , UpperCAmelCase : List[Any]=4 , UpperCAmelCase : Optional[int]=3_7 , UpperCAmelCase : Any="gelu" , UpperCAmelCase : str=0.1 , UpperCAmelCase : Dict=0.1 , UpperCAmelCase : Dict=5_1_2 , UpperCAmelCase : Optional[Any]=1_6 , UpperCAmelCase : Union[str, Any]=2 , UpperCAmelCase : int=0.02 , UpperCAmelCase : Optional[Any]=3 , UpperCAmelCase : Optional[Any]=4 , UpperCAmelCase : Dict=None , ) -> Any:
__lowerCAmelCase: List[Any] = parent
__lowerCAmelCase: List[str] = batch_size
__lowerCAmelCase: Optional[int] = seq_length
__lowerCAmelCase: List[Any] = is_training
__lowerCAmelCase: Tuple = use_input_mask
__lowerCAmelCase: Optional[int] = use_token_type_ids
__lowerCAmelCase: Optional[Any] = use_labels
__lowerCAmelCase: List[str] = vocab_size
__lowerCAmelCase: List[Any] = hidden_size
__lowerCAmelCase: Any = num_hidden_layers
__lowerCAmelCase: Tuple = num_attention_heads
__lowerCAmelCase: List[Any] = intermediate_size
__lowerCAmelCase: Dict = hidden_act
__lowerCAmelCase: str = hidden_dropout_prob
__lowerCAmelCase: Optional[int] = attention_probs_dropout_prob
__lowerCAmelCase: int = max_position_embeddings
__lowerCAmelCase: Tuple = type_vocab_size
__lowerCAmelCase: int = type_sequence_label_size
__lowerCAmelCase: str = initializer_range
__lowerCAmelCase: str = num_labels
__lowerCAmelCase: int = num_choices
__lowerCAmelCase: Optional[int] = scope
def UpperCAmelCase ( self : Optional[int] ) -> List[str]:
__lowerCAmelCase: List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__lowerCAmelCase: Any = None
if self.use_input_mask:
__lowerCAmelCase: Any = random_attention_mask([self.batch_size, self.seq_length] )
__lowerCAmelCase: Tuple = None
if self.use_token_type_ids:
__lowerCAmelCase: str = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__lowerCAmelCase: Optional[int] = None
__lowerCAmelCase: str = None
__lowerCAmelCase: int = None
if self.use_labels:
__lowerCAmelCase: List[str] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__lowerCAmelCase: Dict = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__lowerCAmelCase: Any = ids_tensor([self.batch_size] , self.num_choices )
__lowerCAmelCase: Tuple = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCAmelCase ( self : Any ) -> Optional[Any]:
return OpenLlamaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=UpperCAmelCase , initializer_range=self.initializer_range , use_stable_embedding=UpperCAmelCase , )
def UpperCAmelCase ( self : str , UpperCAmelCase : Optional[Any] , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : Any , UpperCAmelCase : Optional[Any] , UpperCAmelCase : Tuple , UpperCAmelCase : Optional[Any] , UpperCAmelCase : Tuple ) -> Dict:
__lowerCAmelCase: List[Any] = OpenLlamaModel(config=UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
__lowerCAmelCase: Optional[int] = model(UpperCAmelCase , attention_mask=UpperCAmelCase )
__lowerCAmelCase: Any = model(UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCAmelCase ( self : List[Any] , UpperCAmelCase : Optional[int] , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : Optional[Any] , UpperCAmelCase : Tuple , UpperCAmelCase : Dict , UpperCAmelCase : Optional[int] , UpperCAmelCase : Optional[Any] , UpperCAmelCase : List[str] , UpperCAmelCase : Union[str, Any] , ) -> Tuple:
__lowerCAmelCase: List[Any] = True
__lowerCAmelCase: Tuple = OpenLlamaModel(UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
__lowerCAmelCase: Tuple = model(
UpperCAmelCase , attention_mask=UpperCAmelCase , encoder_hidden_states=UpperCAmelCase , encoder_attention_mask=UpperCAmelCase , )
__lowerCAmelCase: Any = model(
UpperCAmelCase , attention_mask=UpperCAmelCase , encoder_hidden_states=UpperCAmelCase , )
__lowerCAmelCase: List[Any] = model(UpperCAmelCase , attention_mask=UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCAmelCase ( self : int , UpperCAmelCase : Tuple , UpperCAmelCase : Any , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : Dict , UpperCAmelCase : List[Any] , UpperCAmelCase : Dict , UpperCAmelCase : List[Any] , UpperCAmelCase : Optional[int] , ) -> Dict:
__lowerCAmelCase: List[Any] = OpenLlamaForCausalLM(config=UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
__lowerCAmelCase: List[str] = model(UpperCAmelCase , attention_mask=UpperCAmelCase , labels=UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCAmelCase ( self : List[Any] , UpperCAmelCase : Dict , UpperCAmelCase : str , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : List[Any] , UpperCAmelCase : List[Any] , UpperCAmelCase : str , UpperCAmelCase : Optional[int] , UpperCAmelCase : int , UpperCAmelCase : Any , ) -> Optional[Any]:
__lowerCAmelCase: List[str] = True
__lowerCAmelCase: Optional[Any] = True
__lowerCAmelCase: Dict = OpenLlamaForCausalLM(config=UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
# first forward pass
__lowerCAmelCase: Optional[Any] = model(
UpperCAmelCase , attention_mask=UpperCAmelCase , encoder_hidden_states=UpperCAmelCase , encoder_attention_mask=UpperCAmelCase , use_cache=UpperCAmelCase , )
__lowerCAmelCase: Optional[Any] = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
__lowerCAmelCase: List[Any] = ids_tensor((self.batch_size, 3) , config.vocab_size )
__lowerCAmelCase: str = ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
__lowerCAmelCase: Tuple = torch.cat([input_ids, next_tokens] , dim=-1 )
__lowerCAmelCase: List[str] = torch.cat([input_mask, next_mask] , dim=-1 )
__lowerCAmelCase: Union[str, Any] = model(
UpperCAmelCase , attention_mask=UpperCAmelCase , encoder_hidden_states=UpperCAmelCase , encoder_attention_mask=UpperCAmelCase , output_hidden_states=UpperCAmelCase , )['hidden_states'][0]
__lowerCAmelCase: str = model(
UpperCAmelCase , attention_mask=UpperCAmelCase , encoder_hidden_states=UpperCAmelCase , encoder_attention_mask=UpperCAmelCase , past_key_values=UpperCAmelCase , output_hidden_states=UpperCAmelCase , )['hidden_states'][0]
# select random slice
__lowerCAmelCase: List[Any] = ids_tensor((1,) , output_from_past.shape[-1] ).item()
__lowerCAmelCase: Any = output_from_no_past[:, -3:, random_slice_idx].detach()
__lowerCAmelCase: Any = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(UpperCAmelCase , UpperCAmelCase , atol=1E-3 ) )
def UpperCAmelCase ( self : List[Any] ) -> Dict:
__lowerCAmelCase: Any = self.prepare_config_and_inputs()
(
(
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) ,
): Tuple = config_and_inputs
__lowerCAmelCase: Union[str, Any] = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class A_ ( snake_case__ , snake_case__ , snake_case__ , unittest.TestCase ):
_lowercase : Tuple = (
(OpenLlamaModel, OpenLlamaForCausalLM, OpenLlamaForSequenceClassification) if is_torch_available() else ()
)
_lowercase : Optional[Any] = (OpenLlamaForCausalLM,) if is_torch_available() else ()
_lowercase : Any = (
{
'feature-extraction': OpenLlamaModel,
'text-classification': OpenLlamaForSequenceClassification,
'text-generation': OpenLlamaForCausalLM,
'zero-shot': OpenLlamaForSequenceClassification,
}
if is_torch_available()
else {}
)
_lowercase : Union[str, Any] = False
_lowercase : int = False
def UpperCAmelCase ( self : Tuple ) -> Optional[Any]:
__lowerCAmelCase: List[Any] = OpenLlamaModelTester(self )
__lowerCAmelCase: List[Any] = ConfigTester(self , config_class=UpperCAmelCase , hidden_size=3_7 )
def UpperCAmelCase ( self : str ) -> Dict:
self.config_tester.run_common_tests()
def UpperCAmelCase ( self : Union[str, Any] ) -> List[Any]:
__lowerCAmelCase: Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCAmelCase )
def UpperCAmelCase ( self : Dict ) -> Union[str, Any]:
__lowerCAmelCase: Any = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
__lowerCAmelCase: List[str] = type
self.model_tester.create_and_check_model(*UpperCAmelCase )
def UpperCAmelCase ( self : Optional[Any] ) -> str:
__lowerCAmelCase , __lowerCAmelCase: List[str] = self.model_tester.prepare_config_and_inputs_for_common()
__lowerCAmelCase: List[str] = 3
__lowerCAmelCase: List[str] = input_dict['input_ids']
__lowerCAmelCase: Optional[int] = input_ids.ne(1 ).to(UpperCAmelCase )
__lowerCAmelCase: Optional[Any] = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
__lowerCAmelCase: Tuple = OpenLlamaForSequenceClassification(UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
__lowerCAmelCase: int = model(UpperCAmelCase , attention_mask=UpperCAmelCase , labels=UpperCAmelCase )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def UpperCAmelCase ( self : Tuple ) -> str:
__lowerCAmelCase , __lowerCAmelCase: Tuple = self.model_tester.prepare_config_and_inputs_for_common()
__lowerCAmelCase: Optional[Any] = 3
__lowerCAmelCase: Optional[int] = 'single_label_classification'
__lowerCAmelCase: Dict = input_dict['input_ids']
__lowerCAmelCase: int = input_ids.ne(1 ).to(UpperCAmelCase )
__lowerCAmelCase: Dict = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
__lowerCAmelCase: int = OpenLlamaForSequenceClassification(UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
__lowerCAmelCase: List[str] = model(UpperCAmelCase , attention_mask=UpperCAmelCase , labels=UpperCAmelCase )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def UpperCAmelCase ( self : List[Any] ) -> Optional[int]:
__lowerCAmelCase , __lowerCAmelCase: Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
__lowerCAmelCase: List[str] = 3
__lowerCAmelCase: List[str] = 'multi_label_classification'
__lowerCAmelCase: List[str] = input_dict['input_ids']
__lowerCAmelCase: Dict = input_ids.ne(1 ).to(UpperCAmelCase )
__lowerCAmelCase: List[Any] = ids_tensor(
[self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float )
__lowerCAmelCase: Dict = OpenLlamaForSequenceClassification(UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
__lowerCAmelCase: Any = model(UpperCAmelCase , attention_mask=UpperCAmelCase , labels=UpperCAmelCase )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
@unittest.skip('Open-Llama buffers include complex numbers, which breaks this test' )
def UpperCAmelCase ( self : Dict ) -> List[str]:
pass
@parameterized.expand([('linear',), ('dynamic',)] )
def UpperCAmelCase ( self : Any , UpperCAmelCase : Any ) -> Any:
__lowerCAmelCase , __lowerCAmelCase: Any = self.model_tester.prepare_config_and_inputs_for_common()
__lowerCAmelCase: Dict = ids_tensor([1, 1_0] , config.vocab_size )
__lowerCAmelCase: List[Any] = ids_tensor([1, int(config.max_position_embeddings * 1.5 )] , config.vocab_size )
set_seed(4_2 ) # Fixed seed at init time so the two models get the same random weights
__lowerCAmelCase: Union[str, Any] = OpenLlamaModel(UpperCAmelCase )
original_model.to(UpperCAmelCase )
original_model.eval()
__lowerCAmelCase: Union[str, Any] = original_model(UpperCAmelCase ).last_hidden_state
__lowerCAmelCase: Dict = original_model(UpperCAmelCase ).last_hidden_state
set_seed(4_2 ) # Fixed seed at init time so the two models get the same random weights
__lowerCAmelCase: Tuple = {'type': scaling_type, 'factor': 10.0}
__lowerCAmelCase: Optional[int] = OpenLlamaModel(UpperCAmelCase )
scaled_model.to(UpperCAmelCase )
scaled_model.eval()
__lowerCAmelCase: Tuple = scaled_model(UpperCAmelCase ).last_hidden_state
__lowerCAmelCase: str = scaled_model(UpperCAmelCase ).last_hidden_state
# Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original
# maximum sequence length, so the outputs for the short input should match.
if scaling_type == "dynamic":
self.assertTrue(torch.allclose(UpperCAmelCase , UpperCAmelCase , atol=1E-5 ) )
else:
self.assertFalse(torch.allclose(UpperCAmelCase , UpperCAmelCase , atol=1E-5 ) )
# The output should be different for long inputs
self.assertFalse(torch.allclose(UpperCAmelCase , UpperCAmelCase , atol=1E-5 ) )
| 322 |
import importlib
import math
import os
from dataclasses import dataclass
from enum import Enum
from typing import Any, Dict, Optional, Tuple, Union
import flax
import jax.numpy as jnp
from ..utils import BaseOutput
_a = '''scheduler_config.json'''
class A_ ( snake_case__ ):
_lowercase : Optional[Any] = 1
_lowercase : Tuple = 2
_lowercase : Dict = 3
_lowercase : int = 4
_lowercase : Optional[Any] = 5
@dataclass
class A_ ( snake_case__ ):
_lowercase : jnp.ndarray
class A_ :
_lowercase : Optional[int] = SCHEDULER_CONFIG_NAME
_lowercase : Dict = ['dtype']
_lowercase : int = []
_lowercase : Union[str, Any] = True
@classmethod
def UpperCAmelCase ( cls : Union[str, Any] , UpperCAmelCase : Dict[str, Any] = None , UpperCAmelCase : Optional[str] = None , UpperCAmelCase : List[str]=False , **UpperCAmelCase : Optional[int] , ) -> Tuple:
__lowerCAmelCase , __lowerCAmelCase: Optional[Any] = cls.load_config(
pretrained_model_name_or_path=UpperCAmelCase , subfolder=UpperCAmelCase , return_unused_kwargs=UpperCAmelCase , **UpperCAmelCase , )
__lowerCAmelCase , __lowerCAmelCase: Optional[Any] = cls.from_config(UpperCAmelCase , return_unused_kwargs=UpperCAmelCase , **UpperCAmelCase )
if hasattr(UpperCAmelCase , 'create_state' ) and getattr(UpperCAmelCase , 'has_state' , UpperCAmelCase ):
__lowerCAmelCase: Dict = scheduler.create_state()
if return_unused_kwargs:
return scheduler, state, unused_kwargs
return scheduler, state
def UpperCAmelCase ( self : Tuple , UpperCAmelCase : Union[str, os.PathLike] , UpperCAmelCase : bool = False , **UpperCAmelCase : Any ) -> List[str]:
self.save_config(save_directory=UpperCAmelCase , push_to_hub=UpperCAmelCase , **UpperCAmelCase )
@property
def UpperCAmelCase ( self : str ) -> Dict:
return self._get_compatibles()
@classmethod
def UpperCAmelCase ( cls : Optional[int] ) -> Any:
__lowerCAmelCase: Optional[int] = list(set([cls.__name__] + cls._compatibles ) )
__lowerCAmelCase: Dict = importlib.import_module(__name__.split('.' )[0] )
__lowerCAmelCase: Dict = [
getattr(UpperCAmelCase , UpperCAmelCase ) for c in compatible_classes_str if hasattr(UpperCAmelCase , UpperCAmelCase )
]
return compatible_classes
def _a ( SCREAMING_SNAKE_CASE : jnp.ndarray , SCREAMING_SNAKE_CASE : Tuple[int] ) -> jnp.ndarray:
"""simple docstring"""
assert len(SCREAMING_SNAKE_CASE ) >= x.ndim
return jnp.broadcast_to(x.reshape(x.shape + (1,) * (len(SCREAMING_SNAKE_CASE ) - x.ndim) ) , SCREAMING_SNAKE_CASE )
def _a ( SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : Any=0.9_9_9 , SCREAMING_SNAKE_CASE : List[Any]=jnp.floataa ) -> jnp.ndarray:
"""simple docstring"""
def alpha_bar(SCREAMING_SNAKE_CASE : str ):
return math.cos((time_step + 0.0_0_8) / 1.0_0_8 * math.pi / 2 ) ** 2
__lowerCAmelCase: str = []
for i in range(SCREAMING_SNAKE_CASE ):
__lowerCAmelCase: Union[str, Any] = i / num_diffusion_timesteps
__lowerCAmelCase: List[str] = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar(SCREAMING_SNAKE_CASE ) / alpha_bar(SCREAMING_SNAKE_CASE ) , SCREAMING_SNAKE_CASE ) )
return jnp.array(SCREAMING_SNAKE_CASE , dtype=SCREAMING_SNAKE_CASE )
@flax.struct.dataclass
class A_ :
_lowercase : jnp.ndarray
_lowercase : jnp.ndarray
_lowercase : jnp.ndarray
@classmethod
def UpperCAmelCase ( cls : str , UpperCAmelCase : Optional[int] ) -> Any:
__lowerCAmelCase: str = scheduler.config
if config.trained_betas is not None:
__lowerCAmelCase: Tuple = jnp.asarray(config.trained_betas , dtype=scheduler.dtype )
elif config.beta_schedule == "linear":
__lowerCAmelCase: Any = jnp.linspace(config.beta_start , config.beta_end , config.num_train_timesteps , dtype=scheduler.dtype )
elif config.beta_schedule == "scaled_linear":
# this schedule is very specific to the latent diffusion model.
__lowerCAmelCase: List[Any] = (
jnp.linspace(
config.beta_start**0.5 , config.beta_end**0.5 , config.num_train_timesteps , dtype=scheduler.dtype )
** 2
)
elif config.beta_schedule == "squaredcos_cap_v2":
# Glide cosine schedule
__lowerCAmelCase: str = betas_for_alpha_bar(config.num_train_timesteps , dtype=scheduler.dtype )
else:
raise NotImplementedError(
F'''beta_schedule {config.beta_schedule} is not implemented for scheduler {scheduler.__class__.__name__}''' )
__lowerCAmelCase: Optional[Any] = 1.0 - betas
__lowerCAmelCase: Optional[Any] = jnp.cumprod(UpperCAmelCase , axis=0 )
return cls(
alphas=UpperCAmelCase , betas=UpperCAmelCase , alphas_cumprod=UpperCAmelCase , )
def _a ( SCREAMING_SNAKE_CASE : CommonSchedulerState , SCREAMING_SNAKE_CASE : jnp.ndarray , SCREAMING_SNAKE_CASE : jnp.ndarray , SCREAMING_SNAKE_CASE : jnp.ndarray ) -> int:
"""simple docstring"""
__lowerCAmelCase: Optional[int] = state.alphas_cumprod
__lowerCAmelCase: str = alphas_cumprod[timesteps] ** 0.5
__lowerCAmelCase: Any = sqrt_alpha_prod.flatten()
__lowerCAmelCase: Any = broadcast_to_shape_from_left(SCREAMING_SNAKE_CASE , original_samples.shape )
__lowerCAmelCase: Any = (1 - alphas_cumprod[timesteps]) ** 0.5
__lowerCAmelCase: str = sqrt_one_minus_alpha_prod.flatten()
__lowerCAmelCase: str = broadcast_to_shape_from_left(SCREAMING_SNAKE_CASE , original_samples.shape )
return sqrt_alpha_prod, sqrt_one_minus_alpha_prod
def _a ( SCREAMING_SNAKE_CASE : CommonSchedulerState , SCREAMING_SNAKE_CASE : jnp.ndarray , SCREAMING_SNAKE_CASE : jnp.ndarray , SCREAMING_SNAKE_CASE : jnp.ndarray ) -> str:
"""simple docstring"""
__lowerCAmelCase , __lowerCAmelCase: Union[str, Any] = get_sqrt_alpha_prod(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
__lowerCAmelCase: Optional[int] = sqrt_alpha_prod * original_samples + sqrt_one_minus_alpha_prod * noise
return noisy_samples
def _a ( SCREAMING_SNAKE_CASE : CommonSchedulerState , SCREAMING_SNAKE_CASE : jnp.ndarray , SCREAMING_SNAKE_CASE : jnp.ndarray , SCREAMING_SNAKE_CASE : jnp.ndarray ) -> Any:
"""simple docstring"""
__lowerCAmelCase , __lowerCAmelCase: Tuple = get_sqrt_alpha_prod(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
__lowerCAmelCase: int = sqrt_alpha_prod * noise - sqrt_one_minus_alpha_prod * sample
return velocity
| 322 | 1 |
import numpy as np
import torch
import tqdm
from ...models.unet_ad import UNetaDModel
from ...pipelines import DiffusionPipeline
from ...utils import randn_tensor
from ...utils.dummy_pt_objects import DDPMScheduler
class A_ ( snake_case__ ):
def __init__( self : Any , UpperCAmelCase : UNetaDModel , UpperCAmelCase : UNetaDModel , UpperCAmelCase : DDPMScheduler , UpperCAmelCase : List[str] , ) -> Optional[int]:
super().__init__()
__lowerCAmelCase: str = value_function
__lowerCAmelCase: Optional[Any] = unet
__lowerCAmelCase: List[str] = scheduler
__lowerCAmelCase: List[Any] = env
__lowerCAmelCase: Tuple = env.get_dataset()
__lowerCAmelCase: int = {}
for key in self.data.keys():
try:
__lowerCAmelCase: Optional[int] = self.data[key].mean()
except: # noqa: E722
pass
__lowerCAmelCase: Optional[Any] = {}
for key in self.data.keys():
try:
__lowerCAmelCase: int = self.data[key].std()
except: # noqa: E722
pass
__lowerCAmelCase: List[str] = env.observation_space.shape[0]
__lowerCAmelCase: int = env.action_space.shape[0]
def UpperCAmelCase ( self : Union[str, Any] , UpperCAmelCase : Any , UpperCAmelCase : Optional[int] ) -> Optional[Any]:
return (x_in - self.means[key]) / self.stds[key]
def UpperCAmelCase ( self : str , UpperCAmelCase : Optional[Any] , UpperCAmelCase : Dict ) -> List[str]:
return x_in * self.stds[key] + self.means[key]
def UpperCAmelCase ( self : List[str] , UpperCAmelCase : Any ) -> List[str]:
if type(UpperCAmelCase ) is dict:
return {k: self.to_torch(UpperCAmelCase ) for k, v in x_in.items()}
elif torch.is_tensor(UpperCAmelCase ):
return x_in.to(self.unet.device )
return torch.tensor(UpperCAmelCase , device=self.unet.device )
def UpperCAmelCase ( self : Tuple , UpperCAmelCase : str , UpperCAmelCase : int , UpperCAmelCase : Optional[Any] ) -> List[str]:
for key, val in cond.items():
__lowerCAmelCase: List[Any] = val.clone()
return x_in
def UpperCAmelCase ( self : List[str] , UpperCAmelCase : Optional[Any] , UpperCAmelCase : Any , UpperCAmelCase : Optional[int] , UpperCAmelCase : List[str] ) -> Tuple:
__lowerCAmelCase: Dict = x.shape[0]
__lowerCAmelCase: Dict = None
for i in tqdm.tqdm(self.scheduler.timesteps ):
# create batch of timesteps to pass into model
__lowerCAmelCase: Dict = torch.full((batch_size,) , UpperCAmelCase , device=self.unet.device , dtype=torch.long )
for _ in range(UpperCAmelCase ):
with torch.enable_grad():
x.requires_grad_()
# permute to match dimension for pre-trained models
__lowerCAmelCase: Any = self.value_function(x.permute(0 , 2 , 1 ) , UpperCAmelCase ).sample
__lowerCAmelCase: List[str] = torch.autograd.grad([y.sum()] , [x] )[0]
__lowerCAmelCase: Tuple = self.scheduler._get_variance(UpperCAmelCase )
__lowerCAmelCase: Union[str, Any] = torch.exp(0.5 * posterior_variance )
__lowerCAmelCase: Dict = model_std * grad
__lowerCAmelCase: List[str] = 0
__lowerCAmelCase: Union[str, Any] = x.detach()
__lowerCAmelCase: Any = x + scale * grad
__lowerCAmelCase: Tuple = self.reset_xa(UpperCAmelCase , UpperCAmelCase , self.action_dim )
__lowerCAmelCase: Any = self.unet(x.permute(0 , 2 , 1 ) , UpperCAmelCase ).sample.permute(0 , 2 , 1 )
# TODO: verify deprecation of this kwarg
__lowerCAmelCase: List[Any] = self.scheduler.step(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , predict_epsilon=UpperCAmelCase )['prev_sample']
# apply conditions to the trajectory (set the initial state)
__lowerCAmelCase: Dict = self.reset_xa(UpperCAmelCase , UpperCAmelCase , self.action_dim )
__lowerCAmelCase: Union[str, Any] = self.to_torch(UpperCAmelCase )
return x, y
def __call__( self : Union[str, Any] , UpperCAmelCase : int , UpperCAmelCase : str=6_4 , UpperCAmelCase : Dict=3_2 , UpperCAmelCase : Any=2 , UpperCAmelCase : Optional[int]=0.1 ) -> Optional[int]:
# normalize the observations and create batch dimension
__lowerCAmelCase: str = self.normalize(UpperCAmelCase , 'observations' )
__lowerCAmelCase: int = obs[None].repeat(UpperCAmelCase , axis=0 )
__lowerCAmelCase: Any = {0: self.to_torch(UpperCAmelCase )}
__lowerCAmelCase: Any = (batch_size, planning_horizon, self.state_dim + self.action_dim)
# generate initial noise and apply our conditions (to make the trajectories start at current state)
__lowerCAmelCase: Union[str, Any] = randn_tensor(UpperCAmelCase , device=self.unet.device )
__lowerCAmelCase: List[str] = self.reset_xa(UpperCAmelCase , UpperCAmelCase , self.action_dim )
__lowerCAmelCase: List[Any] = self.to_torch(UpperCAmelCase )
# run the diffusion process
__lowerCAmelCase , __lowerCAmelCase: Optional[Any] = self.run_diffusion(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
# sort output trajectories by value
__lowerCAmelCase: Optional[int] = y.argsort(0 , descending=UpperCAmelCase ).squeeze()
__lowerCAmelCase: Dict = x[sorted_idx]
__lowerCAmelCase: Union[str, Any] = sorted_values[:, :, : self.action_dim]
__lowerCAmelCase: Optional[int] = actions.detach().cpu().numpy()
__lowerCAmelCase: int = self.de_normalize(UpperCAmelCase , key='actions' )
# select the action with the highest value
if y is not None:
__lowerCAmelCase: Dict = 0
else:
# if we didn't run value guiding, select a random action
__lowerCAmelCase: Optional[Any] = np.random.randint(0 , UpperCAmelCase )
__lowerCAmelCase: List[Any] = denorm_actions[selected_index, 0]
return denorm_actions
| 322 |
_a = {
'''A''': ['''B''', '''C''', '''E'''],
'''B''': ['''A''', '''D''', '''E'''],
'''C''': ['''A''', '''F''', '''G'''],
'''D''': ['''B'''],
'''E''': ['''A''', '''B''', '''D'''],
'''F''': ['''C'''],
'''G''': ['''C'''],
}
def _a ( SCREAMING_SNAKE_CASE : dict , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : Any ) -> list[str]:
"""simple docstring"""
__lowerCAmelCase: int = set()
# keep track of all the paths to be checked
__lowerCAmelCase: str = [[start]]
# return path if start is goal
if start == goal:
return [start]
# keeps looping until all possible paths have been checked
while queue:
# pop the first path from the queue
__lowerCAmelCase: str = queue.pop(0 )
# get the last node from the path
__lowerCAmelCase: Union[str, Any] = path[-1]
if node not in explored:
__lowerCAmelCase: Dict = graph[node]
# go through all neighbour nodes, construct a new path and
# push it into the queue
for neighbour in neighbours:
__lowerCAmelCase: Dict = list(SCREAMING_SNAKE_CASE )
new_path.append(SCREAMING_SNAKE_CASE )
queue.append(SCREAMING_SNAKE_CASE )
# return path if neighbour is goal
if neighbour == goal:
return new_path
# mark node as explored
explored.add(SCREAMING_SNAKE_CASE )
# in case there's no path between the 2 nodes
return []
def _a ( SCREAMING_SNAKE_CASE : dict , SCREAMING_SNAKE_CASE : Union[str, Any] , SCREAMING_SNAKE_CASE : Any ) -> int:
"""simple docstring"""
if not graph or start not in graph or target not in graph:
return -1
if start == target:
return 0
__lowerCAmelCase: Optional[int] = [start]
__lowerCAmelCase: Dict = set(SCREAMING_SNAKE_CASE )
# Keep tab on distances from `start` node.
__lowerCAmelCase: Optional[int] = {start: 0, target: -1}
while queue:
__lowerCAmelCase: Any = queue.pop(0 )
if node == target:
__lowerCAmelCase: Optional[int] = (
dist[node] if dist[target] == -1 else min(dist[target] , dist[node] )
)
for adjacent in graph[node]:
if adjacent not in visited:
visited.add(SCREAMING_SNAKE_CASE )
queue.append(SCREAMING_SNAKE_CASE )
__lowerCAmelCase: Union[str, Any] = dist[node] + 1
return dist[target]
if __name__ == "__main__":
print(bfs_shortest_path(demo_graph, '''G''', '''D''')) # returns ['G', 'C', 'A', 'B', 'D']
print(bfs_shortest_path_distance(demo_graph, '''G''', '''D''')) # returns 4
| 322 | 1 |
import unittest
import numpy as np
import torch
from .utils_summarization import build_mask, compute_token_type_ids, process_story, truncate_or_pad
class A_ ( unittest.TestCase ):
def UpperCAmelCase ( self : Dict ) -> str:
__lowerCAmelCase: Tuple = 1_0
def UpperCAmelCase ( self : int ) -> Union[str, Any]:
__lowerCAmelCase: Optional[Any] = [1, 2, 3, 4]
__lowerCAmelCase: int = [1, 2, 3, 4, 0, 0, 0, 0, 0, 0]
self.assertEqual(truncate_or_pad(UpperCAmelCase , self.block_size , 0 ) , UpperCAmelCase )
def UpperCAmelCase ( self : Optional[Any] ) -> int:
__lowerCAmelCase: Tuple = [1, 2, 3, 4, 5, 6, 7, 8, 9, 1_0]
__lowerCAmelCase: int = [1, 2, 3, 4, 5, 6, 7, 8, 9, 1_0]
self.assertEqual(truncate_or_pad(UpperCAmelCase , self.block_size , 0 ) , UpperCAmelCase )
def UpperCAmelCase ( self : Any ) -> Union[str, Any]:
__lowerCAmelCase: Optional[int] = [1, 2, 3, 4, 5, 6, 7, 8, 9, 1_0, 1_1, 1_2, 1_3]
__lowerCAmelCase: Optional[int] = [1, 2, 3, 4, 5, 6, 7, 8, 9, 1_0]
self.assertEqual(truncate_or_pad(UpperCAmelCase , self.block_size , 0 ) , UpperCAmelCase )
def UpperCAmelCase ( self : Optional[int] ) -> Dict:
__lowerCAmelCase: Dict = 'It was the year of Our Lord one thousand seven hundred and\n seventy-five.\n\nSpiritual revelations were conceded to England at that\n favoured period, as at this.'
__lowerCAmelCase , __lowerCAmelCase: Any = process_story(UpperCAmelCase )
self.assertEqual(UpperCAmelCase , [] )
def UpperCAmelCase ( self : Dict ) -> List[Any]:
__lowerCAmelCase: Optional[int] = ''
__lowerCAmelCase , __lowerCAmelCase: int = process_story(UpperCAmelCase )
self.assertEqual(UpperCAmelCase , [] )
self.assertEqual(UpperCAmelCase , [] )
def UpperCAmelCase ( self : Union[str, Any] ) -> Any:
__lowerCAmelCase: str = (
'It was the year of Our Lord one thousand seven hundred and '
'seventy-five\n\nSpiritual revelations were conceded to England '
'at that favoured period, as at this.\n@highlight\n\nIt was the best of times'
)
__lowerCAmelCase , __lowerCAmelCase: Optional[Any] = process_story(UpperCAmelCase )
__lowerCAmelCase: List[Any] = [
'It was the year of Our Lord one thousand seven hundred and seventy-five.',
'Spiritual revelations were conceded to England at that favoured period, as at this.',
]
self.assertEqual(UpperCAmelCase , UpperCAmelCase )
__lowerCAmelCase: Optional[Any] = ['It was the best of times.']
self.assertEqual(UpperCAmelCase , UpperCAmelCase )
def UpperCAmelCase ( self : List[Any] ) -> List[str]:
__lowerCAmelCase: int = torch.tensor([1, 2, 3, 4] )
__lowerCAmelCase: Optional[Any] = torch.tensor([1, 1, 1, 1] )
np.testing.assert_array_equal(build_mask(UpperCAmelCase , 0 ).numpy() , expected.numpy() )
def UpperCAmelCase ( self : str ) -> Optional[int]:
__lowerCAmelCase: str = torch.tensor([1, 2, 3, 4, 2_3, 2_3, 2_3] )
__lowerCAmelCase: str = torch.tensor([1, 1, 1, 1, 0, 0, 0] )
np.testing.assert_array_equal(build_mask(UpperCAmelCase , 2_3 ).numpy() , expected.numpy() )
def UpperCAmelCase ( self : Union[str, Any] ) -> List[str]:
__lowerCAmelCase: Union[str, Any] = torch.tensor([8, 2, 3, 4, 1, 1, 1] )
__lowerCAmelCase: str = torch.tensor([1, 1, 1, 1, 0, 0, 0] )
np.testing.assert_array_equal(build_mask(UpperCAmelCase , 1 ).numpy() , expected.numpy() )
def UpperCAmelCase ( self : int ) -> Tuple:
__lowerCAmelCase: Optional[int] = 1_0_1
__lowerCAmelCase: List[str] = torch.tensor([[1, 2, 3, 4, 5, 6], [1, 2, 3, 1_0_1, 5, 6], [1, 1_0_1, 3, 4, 1_0_1, 6]] )
__lowerCAmelCase: List[str] = torch.tensor([[1, 1, 1, 1, 1, 1], [1, 1, 1, 0, 0, 0], [1, 0, 0, 0, 1, 1]] )
__lowerCAmelCase: Optional[int] = compute_token_type_ids(UpperCAmelCase , UpperCAmelCase )
np.testing.assert_array_equal(UpperCAmelCase , UpperCAmelCase )
| 322 |
import warnings
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class A_ ( snake_case__ ):
_lowercase : int = ['image_processor', 'tokenizer']
_lowercase : Union[str, Any] = 'LayoutLMv3ImageProcessor'
_lowercase : List[str] = ('LayoutLMv3Tokenizer', 'LayoutLMv3TokenizerFast')
def __init__( self : Any , UpperCAmelCase : Dict=None , UpperCAmelCase : Tuple=None , **UpperCAmelCase : Optional[Any] ) -> str:
__lowerCAmelCase: str = None
if "feature_extractor" in kwargs:
warnings.warn(
'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'
' instead.' , UpperCAmelCase , )
__lowerCAmelCase: List[Any] = kwargs.pop('feature_extractor' )
__lowerCAmelCase: Tuple = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('You need to specify an `image_processor`.' )
if tokenizer is None:
raise ValueError('You need to specify a `tokenizer`.' )
super().__init__(UpperCAmelCase , UpperCAmelCase )
def __call__( self : Optional[int] , UpperCAmelCase : List[Any] , UpperCAmelCase : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , UpperCAmelCase : Optional[Union[PreTokenizedInput, List[PreTokenizedInput]]] = None , UpperCAmelCase : Union[List[List[int]], List[List[List[int]]]] = None , UpperCAmelCase : Optional[Union[List[int], List[List[int]]]] = None , UpperCAmelCase : bool = True , UpperCAmelCase : Union[bool, str, PaddingStrategy] = False , UpperCAmelCase : Union[bool, str, TruncationStrategy] = None , UpperCAmelCase : Optional[int] = None , UpperCAmelCase : int = 0 , UpperCAmelCase : Optional[int] = None , UpperCAmelCase : Optional[bool] = None , UpperCAmelCase : Optional[bool] = None , UpperCAmelCase : bool = False , UpperCAmelCase : bool = False , UpperCAmelCase : bool = False , UpperCAmelCase : bool = False , UpperCAmelCase : bool = True , UpperCAmelCase : Optional[Union[str, TensorType]] = None , **UpperCAmelCase : Tuple , ) -> BatchEncoding:
# verify input
if self.image_processor.apply_ocr and (boxes is not None):
raise ValueError(
'You cannot provide bounding boxes if you initialized the image processor with apply_ocr set to True.' )
if self.image_processor.apply_ocr and (word_labels is not None):
raise ValueError(
'You cannot provide word labels if you initialized the image processor with apply_ocr set to True.' )
# first, apply the image processor
__lowerCAmelCase: str = self.image_processor(images=UpperCAmelCase , return_tensors=UpperCAmelCase )
# second, apply the tokenizer
if text is not None and self.image_processor.apply_ocr and text_pair is None:
if isinstance(UpperCAmelCase , UpperCAmelCase ):
__lowerCAmelCase: Tuple = [text] # add batch dimension (as the image processor always adds a batch dimension)
__lowerCAmelCase: List[str] = features['words']
__lowerCAmelCase: List[Any] = self.tokenizer(
text=text if text is not None else features['words'] , text_pair=text_pair if text_pair is not None else None , boxes=boxes if boxes is not None else features['boxes'] , word_labels=UpperCAmelCase , add_special_tokens=UpperCAmelCase , padding=UpperCAmelCase , truncation=UpperCAmelCase , max_length=UpperCAmelCase , stride=UpperCAmelCase , pad_to_multiple_of=UpperCAmelCase , return_token_type_ids=UpperCAmelCase , return_attention_mask=UpperCAmelCase , return_overflowing_tokens=UpperCAmelCase , return_special_tokens_mask=UpperCAmelCase , return_offsets_mapping=UpperCAmelCase , return_length=UpperCAmelCase , verbose=UpperCAmelCase , return_tensors=UpperCAmelCase , **UpperCAmelCase , )
# add pixel values
__lowerCAmelCase: Tuple = features.pop('pixel_values' )
if return_overflowing_tokens is True:
__lowerCAmelCase: int = self.get_overflowing_images(UpperCAmelCase , encoded_inputs['overflow_to_sample_mapping'] )
__lowerCAmelCase: str = images
return encoded_inputs
def UpperCAmelCase ( self : Optional[int] , UpperCAmelCase : List[Any] , UpperCAmelCase : Union[str, Any] ) -> List[str]:
# in case there's an overflow, ensure each `input_ids` sample is mapped to its corresponding image
__lowerCAmelCase: str = []
for sample_idx in overflow_to_sample_mapping:
images_with_overflow.append(images[sample_idx] )
if len(UpperCAmelCase ) != len(UpperCAmelCase ):
raise ValueError(
'Expected length of images to be the same as the length of `overflow_to_sample_mapping`, but got'
F''' {len(UpperCAmelCase )} and {len(UpperCAmelCase )}''' )
return images_with_overflow
def UpperCAmelCase ( self : Optional[int] , *UpperCAmelCase : Optional[int] , **UpperCAmelCase : Dict ) -> Union[str, Any]:
return self.tokenizer.batch_decode(*UpperCAmelCase , **UpperCAmelCase )
def UpperCAmelCase ( self : Any , *UpperCAmelCase : Dict , **UpperCAmelCase : Any ) -> List[str]:
return self.tokenizer.decode(*UpperCAmelCase , **UpperCAmelCase )
@property
def UpperCAmelCase ( self : Union[str, Any] ) -> str:
return ["input_ids", "bbox", "attention_mask", "pixel_values"]
@property
def UpperCAmelCase ( self : str ) -> Union[str, Any]:
warnings.warn(
'`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.' , UpperCAmelCase , )
return self.image_processor_class
@property
def UpperCAmelCase ( self : Union[str, Any] ) -> Optional[Any]:
warnings.warn(
'`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.' , UpperCAmelCase , )
return self.image_processor
| 322 | 1 |
from collections.abc import Iterable
from typing import Generic, TypeVar
_a = TypeVar('''_T''')
class A_ ( Generic[_T] ):
def __init__( self : Dict , UpperCAmelCase : Iterable[_T] | None = None ) -> None:
__lowerCAmelCase: list[_T] = list(iterable or [] )
__lowerCAmelCase: list[_T] = []
def __len__( self : Optional[int] ) -> int:
return len(self._stacka ) + len(self._stacka )
def __repr__( self : Dict ) -> str:
return F'''Queue({tuple(self._stacka[::-1] + self._stacka )})'''
def UpperCAmelCase ( self : List[str] , UpperCAmelCase : _T ) -> None:
self._stacka.append(UpperCAmelCase )
def UpperCAmelCase ( self : Optional[int] ) -> _T:
__lowerCAmelCase: Optional[int] = self._stacka.pop
__lowerCAmelCase: Optional[Any] = self._stacka.append
if not self._stacka:
while self._stacka:
stacka_append(stacka_pop() )
if not self._stacka:
raise IndexError('Queue is empty' )
return self._stacka.pop()
if __name__ == "__main__":
from doctest import testmod
testmod()
| 322 |
import argparse
from pathlib import Path
import torch
from packaging import version
from torch.onnx import export
from diffusers import AutoencoderKL
_a = version.parse(version.parse(torch.__version__).base_version) < version.parse('''1.11''')
def _a ( SCREAMING_SNAKE_CASE : Any , SCREAMING_SNAKE_CASE : tuple , SCREAMING_SNAKE_CASE : Path , SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : List[Any] , SCREAMING_SNAKE_CASE : Union[str, Any] , SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : Optional[int]=False , ) -> str:
"""simple docstring"""
output_path.parent.mkdir(parents=SCREAMING_SNAKE_CASE , exist_ok=SCREAMING_SNAKE_CASE )
# PyTorch deprecated the `enable_onnx_checker` and `use_external_data_format` arguments in v1.11,
# so we check the torch version for backwards compatibility
if is_torch_less_than_1_11:
export(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , f=output_path.as_posix() , input_names=SCREAMING_SNAKE_CASE , output_names=SCREAMING_SNAKE_CASE , dynamic_axes=SCREAMING_SNAKE_CASE , do_constant_folding=SCREAMING_SNAKE_CASE , use_external_data_format=SCREAMING_SNAKE_CASE , enable_onnx_checker=SCREAMING_SNAKE_CASE , opset_version=SCREAMING_SNAKE_CASE , )
else:
export(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , f=output_path.as_posix() , input_names=SCREAMING_SNAKE_CASE , output_names=SCREAMING_SNAKE_CASE , dynamic_axes=SCREAMING_SNAKE_CASE , do_constant_folding=SCREAMING_SNAKE_CASE , opset_version=SCREAMING_SNAKE_CASE , )
@torch.no_grad()
def _a ( SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : bool = False ) -> Union[str, Any]:
"""simple docstring"""
__lowerCAmelCase: List[Any] = torch.floataa if fpaa else torch.floataa
if fpaa and torch.cuda.is_available():
__lowerCAmelCase: str = 'cuda'
elif fpaa and not torch.cuda.is_available():
raise ValueError('`float16` model export is only supported on GPUs with CUDA' )
else:
__lowerCAmelCase: Dict = 'cpu'
__lowerCAmelCase: Optional[int] = Path(SCREAMING_SNAKE_CASE )
# VAE DECODER
__lowerCAmelCase: Optional[Any] = AutoencoderKL.from_pretrained(model_path + '/vae' )
__lowerCAmelCase: Union[str, Any] = vae_decoder.config.latent_channels
# forward only through the decoder part
__lowerCAmelCase: Any = vae_decoder.decode
onnx_export(
SCREAMING_SNAKE_CASE , model_args=(
torch.randn(1 , SCREAMING_SNAKE_CASE , 25 , 25 ).to(device=SCREAMING_SNAKE_CASE , dtype=SCREAMING_SNAKE_CASE ),
False,
) , output_path=output_path / 'vae_decoder' / 'model.onnx' , ordered_input_names=['latent_sample', 'return_dict'] , output_names=['sample'] , dynamic_axes={
'latent_sample': {0: 'batch', 1: 'channels', 2: 'height', 3: 'width'},
} , opset=SCREAMING_SNAKE_CASE , )
del vae_decoder
if __name__ == "__main__":
_a = argparse.ArgumentParser()
parser.add_argument(
'''--model_path''',
type=str,
required=True,
help='''Path to the `diffusers` checkpoint to convert (either a local directory or on the Hub).''',
)
parser.add_argument('''--output_path''', type=str, required=True, help='''Path to the output model.''')
parser.add_argument(
'''--opset''',
default=1_4,
type=int,
help='''The version of the ONNX operator set to use.''',
)
parser.add_argument('''--fp16''', action='''store_true''', default=False, help='''Export the models in `float16` mode''')
_a = parser.parse_args()
print(args.output_path)
convert_models(args.model_path, args.output_path, args.opset, args.fpaa)
print('''SD: Done: ONNX''')
| 322 | 1 |
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
from ..utils import cached_file
# docstyle-ignore
_a = '''
Human: <<task>>
Assistant: '''
_a = '''huggingface-tools/default-prompts'''
_a = {'''chat''': '''chat_prompt_template.txt''', '''run''': '''run_prompt_template.txt'''}
def _a ( SCREAMING_SNAKE_CASE : Any , SCREAMING_SNAKE_CASE : List[str] , SCREAMING_SNAKE_CASE : Any="run" ) -> List[Any]:
"""simple docstring"""
if prompt_or_repo_id is None:
__lowerCAmelCase: Optional[int] = DEFAULT_PROMPTS_REPO
# prompt is considered a repo ID when it does not contain any kind of space
if re.search('\\s' , SCREAMING_SNAKE_CASE ) is not None:
return prompt_or_repo_id
__lowerCAmelCase: Optional[Any] = cached_file(
SCREAMING_SNAKE_CASE , PROMPT_FILES[mode] , repo_type='dataset' , user_agent={'agent': agent_name} )
with open(SCREAMING_SNAKE_CASE , 'r' , encoding='utf-8' ) as f:
return f.read()
| 322 |
def _a ( SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : list[list[int]] ) -> int:
"""simple docstring"""
def update_area_of_max_square(SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : int ) -> int:
# BASE CASE
if row >= rows or col >= cols:
return 0
__lowerCAmelCase: Union[str, Any] = update_area_of_max_square(SCREAMING_SNAKE_CASE , col + 1 )
__lowerCAmelCase: Tuple = update_area_of_max_square(row + 1 , col + 1 )
__lowerCAmelCase: int = update_area_of_max_square(row + 1 , SCREAMING_SNAKE_CASE )
if mat[row][col]:
__lowerCAmelCase: List[str] = 1 + min([right, diagonal, down] )
__lowerCAmelCase: List[str] = max(largest_square_area[0] , SCREAMING_SNAKE_CASE )
return sub_problem_sol
else:
return 0
__lowerCAmelCase: List[str] = [0]
update_area_of_max_square(0 , 0 )
return largest_square_area[0]
def _a ( SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : list[list[int]] ) -> int:
"""simple docstring"""
def update_area_of_max_square_using_dp_array(
SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : list[list[int]] ) -> int:
if row >= rows or col >= cols:
return 0
if dp_array[row][col] != -1:
return dp_array[row][col]
__lowerCAmelCase: List[Any] = update_area_of_max_square_using_dp_array(SCREAMING_SNAKE_CASE , col + 1 , SCREAMING_SNAKE_CASE )
__lowerCAmelCase: Union[str, Any] = update_area_of_max_square_using_dp_array(row + 1 , col + 1 , SCREAMING_SNAKE_CASE )
__lowerCAmelCase: Any = update_area_of_max_square_using_dp_array(row + 1 , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
if mat[row][col]:
__lowerCAmelCase: int = 1 + min([right, diagonal, down] )
__lowerCAmelCase: Union[str, Any] = max(largest_square_area[0] , SCREAMING_SNAKE_CASE )
__lowerCAmelCase: Optional[int] = sub_problem_sol
return sub_problem_sol
else:
return 0
__lowerCAmelCase: int = [0]
__lowerCAmelCase: int = [[-1] * cols for _ in range(SCREAMING_SNAKE_CASE )]
update_area_of_max_square_using_dp_array(0 , 0 , SCREAMING_SNAKE_CASE )
return largest_square_area[0]
def _a ( SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : list[list[int]] ) -> int:
"""simple docstring"""
__lowerCAmelCase: int = [[0] * (cols + 1) for _ in range(rows + 1 )]
__lowerCAmelCase: Optional[Any] = 0
for row in range(rows - 1 , -1 , -1 ):
for col in range(cols - 1 , -1 , -1 ):
__lowerCAmelCase: Union[str, Any] = dp_array[row][col + 1]
__lowerCAmelCase: str = dp_array[row + 1][col + 1]
__lowerCAmelCase: Optional[int] = dp_array[row + 1][col]
if mat[row][col] == 1:
__lowerCAmelCase: Optional[Any] = 1 + min(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
__lowerCAmelCase: str = max(dp_array[row][col] , SCREAMING_SNAKE_CASE )
else:
__lowerCAmelCase: Dict = 0
return largest_square_area
def _a ( SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : list[list[int]] ) -> int:
"""simple docstring"""
__lowerCAmelCase: Tuple = [0] * (cols + 1)
__lowerCAmelCase: Optional[int] = [0] * (cols + 1)
__lowerCAmelCase: str = 0
for row in range(rows - 1 , -1 , -1 ):
for col in range(cols - 1 , -1 , -1 ):
__lowerCAmelCase: int = current_row[col + 1]
__lowerCAmelCase: Union[str, Any] = next_row[col + 1]
__lowerCAmelCase: Any = next_row[col]
if mat[row][col] == 1:
__lowerCAmelCase: str = 1 + min(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
__lowerCAmelCase: str = max(current_row[col] , SCREAMING_SNAKE_CASE )
else:
__lowerCAmelCase: Optional[Any] = 0
__lowerCAmelCase: int = current_row
return largest_square_area
if __name__ == "__main__":
import doctest
doctest.testmod()
print(largest_square_area_in_matrix_bottom_up(2, 2, [[1, 1], [1, 1]]))
| 322 | 1 |
import argparse
import fairseq
import torch
from torch import nn
from transformers import (
MBartaaTokenizer,
MBartConfig,
MBartForCausalLM,
SpeechEncoderDecoderConfig,
SpeechEncoderDecoderModel,
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaModel,
logging,
)
logging.set_verbosity_info()
_a = logging.get_logger(__name__)
_a = {
'''post_extract_proj''': '''feature_projection.projection''',
'''encoder.pos_conv.0''': '''encoder.pos_conv_embed.conv''',
'''self_attn.k_proj''': '''encoder.layers.*.attention.k_proj''',
'''self_attn.v_proj''': '''encoder.layers.*.attention.v_proj''',
'''self_attn.q_proj''': '''encoder.layers.*.attention.q_proj''',
'''self_attn.out_proj''': '''encoder.layers.*.attention.out_proj''',
'''self_attn_layer_norm''': '''encoder.layers.*.layer_norm''',
'''fc1''': '''encoder.layers.*.feed_forward.intermediate_dense''',
'''fc2''': '''encoder.layers.*.feed_forward.output_dense''',
'''final_layer_norm''': '''encoder.layers.*.final_layer_norm''',
'''encoder.layer_norm''': '''encoder.layer_norm''',
'''w2v_model.layer_norm''': '''feature_projection.layer_norm''',
'''quantizer.weight_proj''': '''quantizer.weight_proj''',
'''quantizer.vars''': '''quantizer.codevectors''',
'''project_q''': '''project_q''',
'''final_proj''': '''project_hid''',
'''w2v_encoder.proj''': '''lm_head''',
'''mask_emb''': '''masked_spec_embed''',
}
_a = [
'''lm_head''',
'''quantizer.weight_proj''',
'''quantizer.codevectors''',
'''project_q''',
'''project_hid''',
]
def _a ( SCREAMING_SNAKE_CASE : Union[str, Any] , SCREAMING_SNAKE_CASE : Dict , SCREAMING_SNAKE_CASE : List[str] , SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : Dict ) -> Union[str, Any]:
"""simple docstring"""
for attribute in key.split('.' ):
__lowerCAmelCase: Any = getattr(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
if weight_type is not None:
__lowerCAmelCase: Optional[Any] = getattr(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ).shape
else:
__lowerCAmelCase: Optional[int] = hf_pointer.shape
assert hf_shape == value.shape, (
f'''Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be'''
f''' {value.shape} for {full_name}'''
)
if weight_type == "weight":
__lowerCAmelCase: List[str] = value
elif weight_type == "weight_g":
__lowerCAmelCase: Any = value
elif weight_type == "weight_v":
__lowerCAmelCase: List[str] = value
elif weight_type == "bias":
__lowerCAmelCase: str = value
else:
__lowerCAmelCase: Union[str, Any] = value
logger.info(f'''{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.''' )
def _a ( SCREAMING_SNAKE_CASE : List[Any] , SCREAMING_SNAKE_CASE : Optional[int] ) -> Any:
"""simple docstring"""
__lowerCAmelCase: Optional[int] = []
__lowerCAmelCase: str = fairseq_model.state_dict()
__lowerCAmelCase: Optional[Any] = hf_model.feature_extractor
__lowerCAmelCase: Tuple = hf_model.adapter
for name, value in fairseq_dict.items():
__lowerCAmelCase: int = False
if "conv_layers" in name:
load_conv_layer(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , hf_model.config.feat_extract_norm == 'group' , )
__lowerCAmelCase: str = True
elif any(x in name for x in ['adaptor', 'w2v_encoder.proj.', 'w2v_proj_ln.'] ):
load_adapter(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
__lowerCAmelCase: List[str] = True
else:
for key, mapped_key in MAPPING.items():
if key in name or key.split('w2v_model.' )[-1] == name.split('.' )[0]:
__lowerCAmelCase: Optional[int] = True
if "*" in mapped_key:
__lowerCAmelCase: Optional[Any] = name.split(SCREAMING_SNAKE_CASE )[0].split('.' )[-2]
__lowerCAmelCase: List[Any] = mapped_key.replace('*' , SCREAMING_SNAKE_CASE )
if "weight_g" in name:
__lowerCAmelCase: str = 'weight_g'
elif "weight_v" in name:
__lowerCAmelCase: Union[str, Any] = 'weight_v'
elif "bias" in name:
__lowerCAmelCase: Dict = 'bias'
elif "weight" in name:
__lowerCAmelCase: Tuple = 'weight'
else:
__lowerCAmelCase: Union[str, Any] = None
set_recursively(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
continue
if not is_used:
unused_weights.append(SCREAMING_SNAKE_CASE )
logger.warning(f'''Unused weights: {unused_weights}''' )
def _a ( SCREAMING_SNAKE_CASE : Union[str, Any] , SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : Any , SCREAMING_SNAKE_CASE : Optional[int] ) -> Any:
"""simple docstring"""
__lowerCAmelCase: Optional[int] = full_name.split('conv_layers.' )[-1]
__lowerCAmelCase: int = name.split('.' )
__lowerCAmelCase: int = int(items[0] )
__lowerCAmelCase: int = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.'''
)
__lowerCAmelCase: Dict = value
logger.info(f'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.'''
)
__lowerCAmelCase: List[Any] = value
logger.info(f'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
f'''{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was'''
" found."
)
__lowerCAmelCase: str = value
logger.info(f'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.'''
)
__lowerCAmelCase: Optional[Any] = value
logger.info(f'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
else:
unused_weights.append(SCREAMING_SNAKE_CASE )
def _a ( SCREAMING_SNAKE_CASE : List[Any] , SCREAMING_SNAKE_CASE : Union[str, Any] , SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : Optional[int] ) -> Tuple:
"""simple docstring"""
__lowerCAmelCase: Any = full_name.split('adaptor.' )[-1]
__lowerCAmelCase: Optional[int] = name.split('.' )
if items[1].isdigit():
__lowerCAmelCase: List[Any] = int(items[1] )
else:
__lowerCAmelCase: Optional[int] = None
if "adaptor" not in full_name:
if "proj_ln" in full_name:
# has to be layer norm
if "bias" in name:
assert (
value.shape == adapter.proj_layer_norm.bias.data.shape
), f'''{full_name} has size {value.shape}, but {adapter.proj_layer_norm.bias.data.shape} was found.'''
__lowerCAmelCase: List[Any] = value
logger.info(f'''Adapter proj layer norm bias was initialized from {full_name}.''' )
if "weight" in name:
assert (
value.shape == adapter.proj_layer_norm.weight.data.shape
), f'''{full_name} has size {value.shape}, but {adapter.proj_layer_norm.weight.data.shape} was found.'''
__lowerCAmelCase: Tuple = value
else:
# has to be projection layer
if "bias" in name:
assert (
value.shape == adapter.proj.bias.data.shape
), f'''{full_name} has size {value.shape}, but {adapter.proj.bias.data.shape} was found.'''
__lowerCAmelCase: Optional[Any] = value
logger.info(f'''Adapter proj layer bias was initialized from {full_name}.''' )
if "weight" in name:
assert (
value.shape == adapter.proj.weight.data.shape
), f'''{full_name} has size {value.shape}, but {adapter.proj.weight.data.shape} was found.'''
__lowerCAmelCase: Optional[int] = value
logger.info(f'''Adapter proj layer weight was initialized from {full_name}.''' )
elif isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
if "bias" in name:
assert (
value.shape == adapter.layers[layer_id].conv.bias.data.shape
), f'''{full_name} has size {value.shape}, but {adapter.layers[layer_id].conv.bias.data.shape} was found.'''
__lowerCAmelCase: Optional[Any] = value
logger.info(f'''Adapter layer {layer_id} bias was initialized from {full_name}.''' )
elif "weight" in name:
assert (
value.shape == adapter.layers[layer_id].conv.weight.data.shape
), f'''{full_name} has size {value.shape}, but {adapter.layers[layer_id].conv.weight.data.shape} was found.'''
__lowerCAmelCase: List[str] = value
logger.info(f'''Adapter layer {layer_id} bias was initialized from {full_name}.''' )
else:
unused_weights.append(SCREAMING_SNAKE_CASE )
def _a ( SCREAMING_SNAKE_CASE : Dict ) -> Tuple:
"""simple docstring"""
__lowerCAmelCase , __lowerCAmelCase: Optional[Any] = emb.weight.shape
__lowerCAmelCase: Optional[int] = nn.Linear(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , bias=SCREAMING_SNAKE_CASE )
__lowerCAmelCase: List[str] = emb.weight.data
return lin_layer
@torch.no_grad()
def _a ( SCREAMING_SNAKE_CASE : List[Any] , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : Union[str, Any] , SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : Any , SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : List[str] , SCREAMING_SNAKE_CASE : Union[str, Any] , SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : int , ) -> Union[str, Any]:
"""simple docstring"""
__lowerCAmelCase: List[Any] = WavaVecaConfig.from_pretrained(
SCREAMING_SNAKE_CASE , add_adapter=SCREAMING_SNAKE_CASE , adapter_stride=SCREAMING_SNAKE_CASE , adapter_kernel_size=SCREAMING_SNAKE_CASE , use_auth_token=SCREAMING_SNAKE_CASE , output_hidden_size=SCREAMING_SNAKE_CASE , )
__lowerCAmelCase: Optional[int] = MBartConfig.from_pretrained(SCREAMING_SNAKE_CASE )
# load model
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase: int = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={
'config_yaml': config_yaml_path,
'data': '/'.join(dict_path.split('/' )[:-1] ),
'w2v_path': checkpoint_path,
'load_pretrained_decoder_from': None,
} , )
__lowerCAmelCase: Any = model[0].eval()
# load feature extractor
__lowerCAmelCase: List[str] = WavaVecaFeatureExtractor.from_pretrained(SCREAMING_SNAKE_CASE , use_auth_token=SCREAMING_SNAKE_CASE )
# set weights for wav2vec2 encoder
__lowerCAmelCase: str = WavaVecaModel(SCREAMING_SNAKE_CASE )
recursively_load_weights_wavaveca(model.encoder , SCREAMING_SNAKE_CASE )
# load decoder weights
__lowerCAmelCase: Union[str, Any] = MBartForCausalLM(SCREAMING_SNAKE_CASE )
__lowerCAmelCase , __lowerCAmelCase: List[Any] = hf_decoder.model.decoder.load_state_dict(model.decoder.state_dict() , strict=SCREAMING_SNAKE_CASE )
logger.warning(f'''The following keys are missing when loading the decoder weights: {missing_keys}''' )
logger.warning(f'''The following keys are unexpected when loading the decoder weights: {unexpected_keys}''' )
__lowerCAmelCase: str = SpeechEncoderDecoderModel(encoder=SCREAMING_SNAKE_CASE , decoder=SCREAMING_SNAKE_CASE )
__lowerCAmelCase: str = False
__lowerCAmelCase: Optional[Any] = MBartaaTokenizer(SCREAMING_SNAKE_CASE )
tokenizer.save_pretrained(SCREAMING_SNAKE_CASE )
__lowerCAmelCase: List[str] = hf_wavavec.config.to_dict()
__lowerCAmelCase: Dict = tokenizer.pad_token_id
__lowerCAmelCase: int = tokenizer.bos_token_id
__lowerCAmelCase: List[str] = tokenizer.eos_token_id
__lowerCAmelCase: str = 'mbart50'
__lowerCAmelCase: Any = 'wav2vec2'
__lowerCAmelCase: Any = tokenizer.eos_token_id
__lowerCAmelCase: Dict = 25_00_04
__lowerCAmelCase: int = tokenizer.eos_token_id
__lowerCAmelCase: int = SpeechEncoderDecoderConfig.from_dict(SCREAMING_SNAKE_CASE )
hf_wavavec.save_pretrained(SCREAMING_SNAKE_CASE )
feature_extractor.save_pretrained(SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
_a = argparse.ArgumentParser()
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to fairseq checkpoint''')
parser.add_argument('''--dict_path''', default=None, type=str, help='''Path to dict of fine-tuned model''')
parser.add_argument('''--config_yaml_path''', default=None, type=str, help='''Path to yaml file of fine-tuned model''')
parser.add_argument(
'''--encoder_config_path''',
default='''facebook/wav2vec2-xls-r-1b''',
type=str,
help='''Path to hf encoder wav2vec2 checkpoint config''',
)
parser.add_argument(
'''--decoder_config_path''',
default='''facebook/mbart-large-50-one-to-many-mmt''',
type=str,
help='''Path to hf decoder checkpoint config''',
)
parser.add_argument('''--add_adapter''', default=True, type=bool, help='''whethere to add model adapter layers''')
parser.add_argument('''--adapter_stride''', default=2, type=int, help='''stride of adapter layers''')
parser.add_argument('''--adapter_kernel_size''', default=3, type=int, help='''kernel size of adapter layers''')
parser.add_argument('''--encoder_output_dim''', default=1_0_2_4, type=int, help='''encoder output dim''')
parser.add_argument('''--start_token_id''', default=2_5_0_0_0_4, type=int, help='''`decoder_start_token_id` of model config''')
_a = parser.parse_args()
convert_wavaveca_checkpoint(
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.dict_path,
args.config_yaml_path,
encoder_config_path=args.encoder_config_path,
decoder_config_path=args.decoder_config_path,
add_adapter=args.add_adapter,
adapter_kernel_size=args.adapter_kernel_size,
adapter_stride=args.adapter_stride,
decoder_start_token_id=args.start_token_id,
encoder_output_dim=args.encoder_output_dim,
)
| 322 |
import argparse
import json
import os
from tensorflow.core.protobuf.saved_model_pba import SavedModel
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_copies.py
_a = '''.'''
# Internal TensorFlow ops that can be safely ignored (mostly specific to a saved model)
_a = [
'''Assert''',
'''AssignVariableOp''',
'''EmptyTensorList''',
'''MergeV2Checkpoints''',
'''ReadVariableOp''',
'''ResourceGather''',
'''RestoreV2''',
'''SaveV2''',
'''ShardedFilename''',
'''StatefulPartitionedCall''',
'''StaticRegexFullMatch''',
'''VarHandleOp''',
]
def _a ( SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : Dict ) -> Union[str, Any]:
"""simple docstring"""
__lowerCAmelCase: Optional[int] = SavedModel()
__lowerCAmelCase: str = []
with open(os.path.join(SCREAMING_SNAKE_CASE , 'utils' , 'tf_ops' , 'onnx.json' ) ) as f:
__lowerCAmelCase: List[str] = json.load(SCREAMING_SNAKE_CASE )['opsets']
for i in range(1 , opset + 1 ):
onnx_ops.extend(onnx_opsets[str(SCREAMING_SNAKE_CASE )] )
with open(SCREAMING_SNAKE_CASE , 'rb' ) as f:
saved_model.ParseFromString(f.read() )
__lowerCAmelCase: Optional[int] = set()
# Iterate over every metagraph in case there is more than one (a saved model can contain multiple graphs)
for meta_graph in saved_model.meta_graphs:
# Add operations in the graph definition
model_op_names.update(node.op for node in meta_graph.graph_def.node )
# Go through the functions in the graph definition
for func in meta_graph.graph_def.library.function:
# Add operations in each function
model_op_names.update(node.op for node in func.node_def )
# Convert to list, sorted if you want
__lowerCAmelCase: List[str] = sorted(SCREAMING_SNAKE_CASE )
__lowerCAmelCase: Optional[int] = []
for op in model_op_names:
if op not in onnx_ops and op not in INTERNAL_OPS:
incompatible_ops.append(SCREAMING_SNAKE_CASE )
if strict and len(SCREAMING_SNAKE_CASE ) > 0:
raise Exception(f'''Found the following incompatible ops for the opset {opset}:\n''' + incompatible_ops )
elif len(SCREAMING_SNAKE_CASE ) > 0:
print(f'''Found the following incompatible ops for the opset {opset}:''' )
print(*SCREAMING_SNAKE_CASE , sep='\n' )
else:
print(f'''The saved model {saved_model_path} can properly be converted with ONNX.''' )
if __name__ == "__main__":
_a = argparse.ArgumentParser()
parser.add_argument('''--saved_model_path''', help='''Path of the saved model to check (the .pb file).''')
parser.add_argument(
'''--opset''', default=1_2, type=int, help='''The ONNX opset against which the model has to be tested.'''
)
parser.add_argument(
'''--framework''', choices=['''onnx'''], default='''onnx''', help='''Frameworks against which to test the saved model.'''
)
parser.add_argument(
'''--strict''', action='''store_true''', help='''Whether make the checking strict (raise errors) or not (raise warnings)'''
)
_a = parser.parse_args()
if args.framework == "onnx":
onnx_compliancy(args.saved_model_path, args.strict, args.opset)
| 322 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_a = logging.get_logger(__name__)
_a = {
'''microsoft/biogpt''': '''https://huggingface.co/microsoft/biogpt/resolve/main/config.json''',
# See all BioGPT models at https://huggingface.co/models?filter=biogpt
}
class A_ ( snake_case__ ):
_lowercase : str = 'biogpt'
def __init__( self : str , UpperCAmelCase : List[str]=4_2_3_8_4 , UpperCAmelCase : Any=1_0_2_4 , UpperCAmelCase : Union[str, Any]=2_4 , UpperCAmelCase : List[str]=1_6 , UpperCAmelCase : List[Any]=4_0_9_6 , UpperCAmelCase : int="gelu" , UpperCAmelCase : str=0.1 , UpperCAmelCase : Optional[Any]=0.1 , UpperCAmelCase : int=1_0_2_4 , UpperCAmelCase : Any=0.02 , UpperCAmelCase : Optional[int]=1E-12 , UpperCAmelCase : Union[str, Any]=True , UpperCAmelCase : Tuple=True , UpperCAmelCase : Any=0.0 , UpperCAmelCase : Union[str, Any]=0.0 , UpperCAmelCase : Any=1 , UpperCAmelCase : Tuple=0 , UpperCAmelCase : Optional[int]=2 , **UpperCAmelCase : Any , ) -> Dict:
__lowerCAmelCase: Optional[Any] = vocab_size
__lowerCAmelCase: Any = max_position_embeddings
__lowerCAmelCase: Dict = hidden_size
__lowerCAmelCase: Union[str, Any] = num_hidden_layers
__lowerCAmelCase: str = num_attention_heads
__lowerCAmelCase: Dict = intermediate_size
__lowerCAmelCase: Dict = hidden_act
__lowerCAmelCase: Tuple = hidden_dropout_prob
__lowerCAmelCase: Tuple = attention_probs_dropout_prob
__lowerCAmelCase: Optional[Any] = initializer_range
__lowerCAmelCase: Dict = layer_norm_eps
__lowerCAmelCase: Dict = scale_embedding
__lowerCAmelCase: Tuple = use_cache
__lowerCAmelCase: Optional[Any] = layerdrop
__lowerCAmelCase: Union[str, Any] = activation_dropout
super().__init__(pad_token_id=UpperCAmelCase , bos_token_id=UpperCAmelCase , eos_token_id=UpperCAmelCase , **UpperCAmelCase )
| 322 |
import math
import qiskit
def _a ( SCREAMING_SNAKE_CASE : int = 1 , SCREAMING_SNAKE_CASE : int = 1 , SCREAMING_SNAKE_CASE : int = 1 ) -> qiskit.result.counts.Counts:
"""simple docstring"""
if (
isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
or isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
or isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
):
raise TypeError('inputs must be integers.' )
if (input_a < 0) or (input_a < 0) or (carry_in < 0):
raise ValueError('inputs must be positive.' )
if (
(math.floor(SCREAMING_SNAKE_CASE ) != input_a)
or (math.floor(SCREAMING_SNAKE_CASE ) != input_a)
or (math.floor(SCREAMING_SNAKE_CASE ) != carry_in)
):
raise ValueError('inputs must be exact integers.' )
if (input_a > 2) or (input_a > 2) or (carry_in > 2):
raise ValueError('inputs must be less or equal to 2.' )
# build registers
__lowerCAmelCase: Union[str, Any] = qiskit.QuantumRegister(4 , 'qr' )
__lowerCAmelCase: List[Any] = qiskit.ClassicalRegister(2 , 'cr' )
# list the entries
__lowerCAmelCase: Any = [input_a, input_a, carry_in]
__lowerCAmelCase: List[str] = qiskit.QuantumCircuit(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
for i in range(0 , 3 ):
if entry[i] == 2:
quantum_circuit.h(SCREAMING_SNAKE_CASE ) # for hadamard entries
elif entry[i] == 1:
quantum_circuit.x(SCREAMING_SNAKE_CASE ) # for 1 entries
elif entry[i] == 0:
quantum_circuit.i(SCREAMING_SNAKE_CASE ) # for 0 entries
# build the circuit
quantum_circuit.ccx(0 , 1 , 3 ) # ccx = toffoli gate
quantum_circuit.cx(0 , 1 )
quantum_circuit.ccx(1 , 2 , 3 )
quantum_circuit.cx(1 , 2 )
quantum_circuit.cx(0 , 1 )
quantum_circuit.measure([2, 3] , SCREAMING_SNAKE_CASE ) # measure the last two qbits
__lowerCAmelCase: List[str] = qiskit.Aer.get_backend('aer_simulator' )
__lowerCAmelCase: List[Any] = qiskit.execute(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , shots=10_00 )
return job.result().get_counts(SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
print(f"Total sum count for state is: {quantum_full_adder(1, 1, 1)}")
| 322 | 1 |
import unittest
import numpy as np
from transformers import AlbertConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.albert.modeling_flax_albert import (
FlaxAlbertForMaskedLM,
FlaxAlbertForMultipleChoice,
FlaxAlbertForPreTraining,
FlaxAlbertForQuestionAnswering,
FlaxAlbertForSequenceClassification,
FlaxAlbertForTokenClassification,
FlaxAlbertModel,
)
class A_ ( unittest.TestCase ):
def __init__( self : List[str] , UpperCAmelCase : Tuple , UpperCAmelCase : Optional[Any]=1_3 , UpperCAmelCase : Optional[int]=7 , UpperCAmelCase : Tuple=True , UpperCAmelCase : str=True , UpperCAmelCase : List[Any]=True , UpperCAmelCase : Union[str, Any]=True , UpperCAmelCase : List[str]=9_9 , UpperCAmelCase : Optional[int]=3_2 , UpperCAmelCase : Dict=5 , UpperCAmelCase : int=4 , UpperCAmelCase : Optional[Any]=3_7 , UpperCAmelCase : List[str]="gelu" , UpperCAmelCase : Tuple=0.1 , UpperCAmelCase : int=0.1 , UpperCAmelCase : str=5_1_2 , UpperCAmelCase : Dict=1_6 , UpperCAmelCase : Union[str, Any]=2 , UpperCAmelCase : int=0.02 , UpperCAmelCase : List[Any]=4 , ) -> Optional[Any]:
__lowerCAmelCase: str = parent
__lowerCAmelCase: Dict = batch_size
__lowerCAmelCase: Optional[int] = seq_length
__lowerCAmelCase: Dict = is_training
__lowerCAmelCase: Optional[Any] = use_attention_mask
__lowerCAmelCase: List[Any] = use_token_type_ids
__lowerCAmelCase: Optional[int] = use_labels
__lowerCAmelCase: Optional[Any] = vocab_size
__lowerCAmelCase: Optional[Any] = hidden_size
__lowerCAmelCase: Tuple = num_hidden_layers
__lowerCAmelCase: List[str] = num_attention_heads
__lowerCAmelCase: int = intermediate_size
__lowerCAmelCase: Union[str, Any] = hidden_act
__lowerCAmelCase: List[Any] = hidden_dropout_prob
__lowerCAmelCase: List[str] = attention_probs_dropout_prob
__lowerCAmelCase: Optional[int] = max_position_embeddings
__lowerCAmelCase: Union[str, Any] = type_vocab_size
__lowerCAmelCase: int = type_sequence_label_size
__lowerCAmelCase: Union[str, Any] = initializer_range
__lowerCAmelCase: Any = num_choices
def UpperCAmelCase ( self : Union[str, Any] ) -> Tuple:
__lowerCAmelCase: List[str] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__lowerCAmelCase: List[Any] = None
if self.use_attention_mask:
__lowerCAmelCase: List[str] = random_attention_mask([self.batch_size, self.seq_length] )
__lowerCAmelCase: Optional[Any] = None
if self.use_token_type_ids:
__lowerCAmelCase: List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__lowerCAmelCase: Optional[int] = AlbertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=UpperCAmelCase , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def UpperCAmelCase ( self : Dict ) -> Any:
__lowerCAmelCase: Optional[int] = self.prepare_config_and_inputs()
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase: Optional[Any] = config_and_inputs
__lowerCAmelCase: Tuple = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': attention_mask}
return config, inputs_dict
@require_flax
class A_ ( snake_case__ , unittest.TestCase ):
_lowercase : Dict = (
(
FlaxAlbertModel,
FlaxAlbertForPreTraining,
FlaxAlbertForMaskedLM,
FlaxAlbertForMultipleChoice,
FlaxAlbertForQuestionAnswering,
FlaxAlbertForSequenceClassification,
FlaxAlbertForTokenClassification,
FlaxAlbertForQuestionAnswering,
)
if is_flax_available()
else ()
)
def UpperCAmelCase ( self : List[str] ) -> Optional[int]:
__lowerCAmelCase: List[Any] = FlaxAlbertModelTester(self )
@slow
def UpperCAmelCase ( self : Tuple ) -> Dict:
for model_class_name in self.all_model_classes:
__lowerCAmelCase: Optional[Any] = model_class_name.from_pretrained('albert-base-v2' )
__lowerCAmelCase: Dict = model(np.ones((1, 1) ) )
self.assertIsNotNone(UpperCAmelCase )
@require_flax
class A_ ( unittest.TestCase ):
@slow
def UpperCAmelCase ( self : Any ) -> Any:
__lowerCAmelCase: List[Any] = FlaxAlbertModel.from_pretrained('albert-base-v2' )
__lowerCAmelCase: Optional[int] = np.array([[0, 3_4_5, 2_3_2, 3_2_8, 7_4_0, 1_4_0, 1_6_9_5, 6_9, 6_0_7_8, 1_5_8_8, 2]] )
__lowerCAmelCase: Tuple = np.array([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
__lowerCAmelCase: Tuple = model(UpperCAmelCase , attention_mask=UpperCAmelCase )[0]
__lowerCAmelCase: str = (1, 1_1, 7_6_8)
self.assertEqual(output.shape , UpperCAmelCase )
__lowerCAmelCase: List[str] = np.array(
[[[-0.6513, 1.5035, -0.2766], [-0.6515, 1.5046, -0.2780], [-0.6512, 1.5049, -0.2784]]] )
self.assertTrue(jnp.allclose(output[:, 1:4, 1:4] , UpperCAmelCase , atol=1E-4 ) )
| 322 |
from __future__ import annotations
import copy
import inspect
import unittest
import numpy as np
from transformers import is_tf_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_tf, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST,
TF_MODEL_FOR_MULTIPLE_CHOICE_MAPPING,
TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
LayoutLMvaConfig,
TFLayoutLMvaForQuestionAnswering,
TFLayoutLMvaForSequenceClassification,
TFLayoutLMvaForTokenClassification,
TFLayoutLMvaModel,
)
if is_vision_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class A_ :
def __init__( self : Dict , UpperCAmelCase : Optional[Any] , UpperCAmelCase : Optional[int]=2 , UpperCAmelCase : int=3 , UpperCAmelCase : int=4 , UpperCAmelCase : str=2 , UpperCAmelCase : Union[str, Any]=7 , UpperCAmelCase : List[str]=True , UpperCAmelCase : Dict=True , UpperCAmelCase : Tuple=True , UpperCAmelCase : Optional[Any]=True , UpperCAmelCase : Optional[Any]=9_9 , UpperCAmelCase : Tuple=3_6 , UpperCAmelCase : Tuple=2 , UpperCAmelCase : Optional[int]=4 , UpperCAmelCase : Union[str, Any]=3_7 , UpperCAmelCase : Any="gelu" , UpperCAmelCase : Optional[int]=0.1 , UpperCAmelCase : Optional[Any]=0.1 , UpperCAmelCase : List[str]=5_1_2 , UpperCAmelCase : int=1_6 , UpperCAmelCase : Optional[Any]=2 , UpperCAmelCase : Optional[Any]=0.02 , UpperCAmelCase : Optional[Any]=6 , UpperCAmelCase : int=6 , UpperCAmelCase : str=3 , UpperCAmelCase : Any=4 , UpperCAmelCase : Optional[int]=None , UpperCAmelCase : List[str]=1_0_0_0 , ) -> int:
__lowerCAmelCase: List[str] = parent
__lowerCAmelCase: List[str] = batch_size
__lowerCAmelCase: Optional[Any] = num_channels
__lowerCAmelCase: Tuple = image_size
__lowerCAmelCase: str = patch_size
__lowerCAmelCase: List[str] = is_training
__lowerCAmelCase: Union[str, Any] = use_input_mask
__lowerCAmelCase: Union[str, Any] = use_token_type_ids
__lowerCAmelCase: Tuple = use_labels
__lowerCAmelCase: Optional[int] = vocab_size
__lowerCAmelCase: Any = hidden_size
__lowerCAmelCase: Tuple = num_hidden_layers
__lowerCAmelCase: Optional[int] = num_attention_heads
__lowerCAmelCase: Dict = intermediate_size
__lowerCAmelCase: Union[str, Any] = hidden_act
__lowerCAmelCase: str = hidden_dropout_prob
__lowerCAmelCase: str = attention_probs_dropout_prob
__lowerCAmelCase: str = max_position_embeddings
__lowerCAmelCase: str = type_vocab_size
__lowerCAmelCase: Optional[Any] = type_sequence_label_size
__lowerCAmelCase: Union[str, Any] = initializer_range
__lowerCAmelCase: List[str] = coordinate_size
__lowerCAmelCase: Tuple = shape_size
__lowerCAmelCase: List[Any] = num_labels
__lowerCAmelCase: Any = num_choices
__lowerCAmelCase: List[str] = scope
__lowerCAmelCase: Dict = range_bbox
# LayoutLMv3's sequence length equals the number of text tokens + number of patches + 1 (we add 1 for the CLS token)
__lowerCAmelCase: Optional[Any] = text_seq_length
__lowerCAmelCase: List[Any] = (image_size // patch_size) ** 2 + 1
__lowerCAmelCase: int = self.text_seq_length + self.image_seq_length
def UpperCAmelCase ( self : Any ) -> Any:
__lowerCAmelCase: Dict = ids_tensor([self.batch_size, self.text_seq_length] , self.vocab_size )
__lowerCAmelCase: Any = ids_tensor([self.batch_size, self.text_seq_length, 4] , self.range_bbox )
__lowerCAmelCase: str = bbox.numpy()
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
__lowerCAmelCase: Optional[Any] = bbox[i, j, 3]
__lowerCAmelCase: Tuple = bbox[i, j, 1]
__lowerCAmelCase: Dict = tmp_coordinate
if bbox[i, j, 2] < bbox[i, j, 0]:
__lowerCAmelCase: Any = bbox[i, j, 2]
__lowerCAmelCase: int = bbox[i, j, 0]
__lowerCAmelCase: int = tmp_coordinate
__lowerCAmelCase: List[Any] = tf.constant(UpperCAmelCase )
__lowerCAmelCase: Dict = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__lowerCAmelCase: Union[str, Any] = None
if self.use_input_mask:
__lowerCAmelCase: List[Any] = random_attention_mask([self.batch_size, self.text_seq_length] )
__lowerCAmelCase: int = None
if self.use_token_type_ids:
__lowerCAmelCase: List[Any] = ids_tensor([self.batch_size, self.text_seq_length] , self.type_vocab_size )
__lowerCAmelCase: str = None
__lowerCAmelCase: Dict = None
if self.use_labels:
__lowerCAmelCase: Dict = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__lowerCAmelCase: List[str] = ids_tensor([self.batch_size, self.text_seq_length] , self.num_labels )
__lowerCAmelCase: Dict = LayoutLMvaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , coordinate_size=self.coordinate_size , shape_size=self.shape_size , input_size=self.image_size , patch_size=self.patch_size , )
return config, input_ids, bbox, pixel_values, token_type_ids, input_mask, sequence_labels, token_labels
def UpperCAmelCase ( self : Tuple , UpperCAmelCase : str , UpperCAmelCase : Optional[int] , UpperCAmelCase : Optional[int] , UpperCAmelCase : int , UpperCAmelCase : List[Any] , UpperCAmelCase : Tuple ) -> int:
__lowerCAmelCase: Tuple = TFLayoutLMvaModel(config=UpperCAmelCase )
# text + image
__lowerCAmelCase: Dict = model(UpperCAmelCase , pixel_values=UpperCAmelCase , training=UpperCAmelCase )
__lowerCAmelCase: List[str] = model(
UpperCAmelCase , bbox=UpperCAmelCase , pixel_values=UpperCAmelCase , attention_mask=UpperCAmelCase , token_type_ids=UpperCAmelCase , training=UpperCAmelCase , )
__lowerCAmelCase: Optional[Any] = model(UpperCAmelCase , bbox=UpperCAmelCase , pixel_values=UpperCAmelCase , training=UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
# text only
__lowerCAmelCase: str = model(UpperCAmelCase , training=UpperCAmelCase )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.text_seq_length, self.hidden_size) )
# image only
__lowerCAmelCase: List[str] = model({'pixel_values': pixel_values} , training=UpperCAmelCase )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.image_seq_length, self.hidden_size) )
def UpperCAmelCase ( self : Union[str, Any] , UpperCAmelCase : Optional[int] , UpperCAmelCase : Any , UpperCAmelCase : Optional[int] , UpperCAmelCase : List[str] , UpperCAmelCase : int , UpperCAmelCase : Tuple , UpperCAmelCase : List[Any] ) -> int:
__lowerCAmelCase: List[str] = self.num_labels
__lowerCAmelCase: Tuple = TFLayoutLMvaForSequenceClassification(config=UpperCAmelCase )
__lowerCAmelCase: int = model(
UpperCAmelCase , bbox=UpperCAmelCase , pixel_values=UpperCAmelCase , attention_mask=UpperCAmelCase , token_type_ids=UpperCAmelCase , labels=UpperCAmelCase , training=UpperCAmelCase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def UpperCAmelCase ( self : Any , UpperCAmelCase : List[Any] , UpperCAmelCase : Optional[Any] , UpperCAmelCase : int , UpperCAmelCase : int , UpperCAmelCase : List[Any] , UpperCAmelCase : str , UpperCAmelCase : int ) -> Any:
__lowerCAmelCase: Union[str, Any] = self.num_labels
__lowerCAmelCase: List[str] = TFLayoutLMvaForTokenClassification(config=UpperCAmelCase )
__lowerCAmelCase: Any = model(
UpperCAmelCase , bbox=UpperCAmelCase , pixel_values=UpperCAmelCase , attention_mask=UpperCAmelCase , token_type_ids=UpperCAmelCase , labels=UpperCAmelCase , training=UpperCAmelCase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.text_seq_length, self.num_labels) )
def UpperCAmelCase ( self : List[Any] , UpperCAmelCase : Any , UpperCAmelCase : str , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : Any , UpperCAmelCase : str , UpperCAmelCase : List[str] , UpperCAmelCase : List[Any] ) -> Any:
__lowerCAmelCase: str = 2
__lowerCAmelCase: Dict = TFLayoutLMvaForQuestionAnswering(config=UpperCAmelCase )
__lowerCAmelCase: int = model(
UpperCAmelCase , bbox=UpperCAmelCase , pixel_values=UpperCAmelCase , attention_mask=UpperCAmelCase , token_type_ids=UpperCAmelCase , start_positions=UpperCAmelCase , end_positions=UpperCAmelCase , training=UpperCAmelCase , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def UpperCAmelCase ( self : List[str] ) -> Union[str, Any]:
__lowerCAmelCase: Union[str, Any] = self.prepare_config_and_inputs()
((__lowerCAmelCase) , (__lowerCAmelCase) , (__lowerCAmelCase) , (__lowerCAmelCase) , (__lowerCAmelCase) , (__lowerCAmelCase) , (__lowerCAmelCase) , (__lowerCAmelCase)): List[str] = config_and_inputs
__lowerCAmelCase: List[str] = {
'input_ids': input_ids,
'bbox': bbox,
'pixel_values': pixel_values,
'token_type_ids': token_type_ids,
'attention_mask': input_mask,
}
return config, inputs_dict
@require_tf
class A_ ( snake_case__ , snake_case__ , unittest.TestCase ):
_lowercase : List[Any] = (
(
TFLayoutLMvaModel,
TFLayoutLMvaForQuestionAnswering,
TFLayoutLMvaForSequenceClassification,
TFLayoutLMvaForTokenClassification,
)
if is_tf_available()
else ()
)
_lowercase : Tuple = (
{'document-question-answering': TFLayoutLMvaForQuestionAnswering, 'feature-extraction': TFLayoutLMvaModel}
if is_tf_available()
else {}
)
_lowercase : Union[str, Any] = False
_lowercase : Dict = False
_lowercase : Tuple = False
def UpperCAmelCase ( self : Optional[Any] , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : int , UpperCAmelCase : Tuple , UpperCAmelCase : Optional[Any] , UpperCAmelCase : Optional[Any] ) -> List[str]:
return True
def UpperCAmelCase ( self : Tuple , UpperCAmelCase : int , UpperCAmelCase : Tuple , UpperCAmelCase : Dict=False ) -> dict:
__lowerCAmelCase: Optional[Any] = copy.deepcopy(UpperCAmelCase )
if model_class in get_values(UpperCAmelCase ):
__lowerCAmelCase: int = {
k: tf.tile(tf.expand_dims(UpperCAmelCase , 1 ) , (1, self.model_tester.num_choices) + (1,) * (v.ndim - 1) )
if isinstance(UpperCAmelCase , tf.Tensor ) and v.ndim > 0
else v
for k, v in inputs_dict.items()
}
if return_labels:
if model_class in get_values(UpperCAmelCase ):
__lowerCAmelCase: Tuple = tf.ones(self.model_tester.batch_size , dtype=tf.intaa )
elif model_class in get_values(UpperCAmelCase ):
__lowerCAmelCase: Optional[Any] = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa )
__lowerCAmelCase: Union[str, Any] = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa )
elif model_class in get_values(UpperCAmelCase ):
__lowerCAmelCase: Union[str, Any] = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa )
elif model_class in get_values(UpperCAmelCase ):
__lowerCAmelCase: str = tf.zeros(
(self.model_tester.batch_size, self.model_tester.text_seq_length) , dtype=tf.intaa )
return inputs_dict
def UpperCAmelCase ( self : Optional[int] ) -> Optional[Any]:
__lowerCAmelCase: Tuple = TFLayoutLMvaModelTester(self )
__lowerCAmelCase: str = ConfigTester(self , config_class=UpperCAmelCase , hidden_size=3_7 )
def UpperCAmelCase ( self : Tuple ) -> Dict:
self.config_tester.run_common_tests()
def UpperCAmelCase ( self : List[Any] ) -> Tuple:
__lowerCAmelCase , __lowerCAmelCase: Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowerCAmelCase: List[Any] = model_class(UpperCAmelCase )
if getattr(UpperCAmelCase , 'hf_compute_loss' , UpperCAmelCase ):
# The number of elements in the loss should be the same as the number of elements in the label
__lowerCAmelCase: Optional[int] = self._prepare_for_class(inputs_dict.copy() , UpperCAmelCase , return_labels=UpperCAmelCase )
__lowerCAmelCase: List[Any] = prepared_for_class[
sorted(prepared_for_class.keys() - inputs_dict.keys() , reverse=UpperCAmelCase )[0]
]
__lowerCAmelCase: Tuple = added_label.shape.as_list()[:1]
# Test that model correctly compute the loss with kwargs
__lowerCAmelCase: Optional[Any] = self._prepare_for_class(inputs_dict.copy() , UpperCAmelCase , return_labels=UpperCAmelCase )
__lowerCAmelCase: Tuple = prepared_for_class.pop('input_ids' )
__lowerCAmelCase: Union[str, Any] = model(UpperCAmelCase , **UpperCAmelCase )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
# Test that model correctly compute the loss when we mask some positions
__lowerCAmelCase: Union[str, Any] = self._prepare_for_class(inputs_dict.copy() , UpperCAmelCase , return_labels=UpperCAmelCase )
__lowerCAmelCase: Optional[int] = prepared_for_class.pop('input_ids' )
if "labels" in prepared_for_class:
__lowerCAmelCase: str = prepared_for_class['labels'].numpy()
if len(labels.shape ) > 1 and labels.shape[1] != 1:
__lowerCAmelCase: Tuple = -1_0_0
__lowerCAmelCase: Union[str, Any] = tf.convert_to_tensor(UpperCAmelCase )
__lowerCAmelCase: Dict = model(UpperCAmelCase , **UpperCAmelCase )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
self.assertTrue(not np.any(np.isnan(loss.numpy() ) ) )
# Test that model correctly compute the loss with a dict
__lowerCAmelCase: str = self._prepare_for_class(inputs_dict.copy() , UpperCAmelCase , return_labels=UpperCAmelCase )
__lowerCAmelCase: Optional[Any] = model(UpperCAmelCase )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
# Test that model correctly compute the loss with a tuple
__lowerCAmelCase: Any = self._prepare_for_class(inputs_dict.copy() , UpperCAmelCase , return_labels=UpperCAmelCase )
# Get keys that were added with the _prepare_for_class function
__lowerCAmelCase: Tuple = prepared_for_class.keys() - inputs_dict.keys()
__lowerCAmelCase: Dict = inspect.signature(model.call ).parameters
__lowerCAmelCase: Dict = list(signature.keys() )
# Create a dictionary holding the location of the tensors in the tuple
__lowerCAmelCase: str = {0: 'input_ids'}
for label_key in label_keys:
__lowerCAmelCase: Optional[Any] = signature_names.index(UpperCAmelCase )
__lowerCAmelCase: Tuple = label_key
__lowerCAmelCase: Tuple = sorted(tuple_index_mapping.items() )
# Initialize a list with their default values, update the values and convert to a tuple
__lowerCAmelCase: List[Any] = []
for name in signature_names:
if name != "kwargs":
list_input.append(signature[name].default )
for index, value in sorted_tuple_index_mapping:
__lowerCAmelCase: Optional[Any] = prepared_for_class[value]
__lowerCAmelCase: Union[str, Any] = tuple(UpperCAmelCase )
# Send to model
__lowerCAmelCase: Any = model(tuple_input[:-1] )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
def UpperCAmelCase ( self : Dict ) -> Tuple:
(
(
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) ,
): str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
def UpperCAmelCase ( self : Dict ) -> int:
(
(
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) ,
): str = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
__lowerCAmelCase: Tuple = type
self.model_tester.create_and_check_model(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
def UpperCAmelCase ( self : str ) -> List[str]:
(
(
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) ,
): Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
def UpperCAmelCase ( self : int ) -> List[str]:
(
(
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) ,
): Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
def UpperCAmelCase ( self : Tuple ) -> str:
(
(
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) ,
): Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
@slow
def UpperCAmelCase ( self : Union[str, Any] ) -> Tuple:
for model_name in TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowerCAmelCase: Optional[int] = TFLayoutLMvaModel.from_pretrained(UpperCAmelCase )
self.assertIsNotNone(UpperCAmelCase )
def _a ( ) -> Any:
"""simple docstring"""
__lowerCAmelCase: Dict = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_tf
class A_ ( unittest.TestCase ):
@cached_property
def UpperCAmelCase ( self : int ) -> Dict:
return LayoutLMvaImageProcessor(apply_ocr=UpperCAmelCase ) if is_vision_available() else None
@slow
def UpperCAmelCase ( self : Any ) -> List[str]:
__lowerCAmelCase: Any = TFLayoutLMvaModel.from_pretrained('microsoft/layoutlmv3-base' )
__lowerCAmelCase: Tuple = self.default_image_processor
__lowerCAmelCase: str = prepare_img()
__lowerCAmelCase: Optional[int] = image_processor(images=UpperCAmelCase , return_tensors='tf' ).pixel_values
__lowerCAmelCase: Dict = tf.constant([[1, 2]] )
__lowerCAmelCase: str = tf.expand_dims(tf.constant([[1, 2, 3, 4], [5, 6, 7, 8]] ) , axis=0 )
# forward pass
__lowerCAmelCase: List[str] = model(input_ids=UpperCAmelCase , bbox=UpperCAmelCase , pixel_values=UpperCAmelCase , training=UpperCAmelCase )
# verify the logits
__lowerCAmelCase: Tuple = (1, 1_9_9, 7_6_8)
self.assertEqual(outputs.last_hidden_state.shape , UpperCAmelCase )
__lowerCAmelCase: str = tf.constant(
[[-0.0529, 0.3618, 0.1632], [-0.1587, -0.1667, -0.0400], [-0.1557, -0.1671, -0.0505]] )
self.assertTrue(np.allclose(outputs.last_hidden_state[0, :3, :3] , UpperCAmelCase , atol=1E-4 ) )
| 322 | 1 |
from __future__ import annotations
import queue
class A_ :
def __init__( self : List[str] , UpperCAmelCase : Dict ) -> int:
__lowerCAmelCase: Any = data
__lowerCAmelCase: Any = None
__lowerCAmelCase: Dict = None
def _a ( ) -> TreeNode:
"""simple docstring"""
print('\n********Press N to stop entering at any point of time********\n' )
__lowerCAmelCase: Dict = input('Enter the value of the root node: ' ).strip().lower()
__lowerCAmelCase: queue.Queue = queue.Queue()
__lowerCAmelCase: int = TreeNode(int(SCREAMING_SNAKE_CASE ) )
q.put(SCREAMING_SNAKE_CASE )
while not q.empty():
__lowerCAmelCase: str = q.get()
__lowerCAmelCase: str = f'''Enter the left node of {node_found.data}: '''
__lowerCAmelCase: int = input(SCREAMING_SNAKE_CASE ).strip().lower() or 'n'
if check == "n":
return tree_node
__lowerCAmelCase: Optional[Any] = TreeNode(int(SCREAMING_SNAKE_CASE ) )
__lowerCAmelCase: str = left_node
q.put(SCREAMING_SNAKE_CASE )
__lowerCAmelCase: List[str] = f'''Enter the right node of {node_found.data}: '''
__lowerCAmelCase: Optional[Any] = input(SCREAMING_SNAKE_CASE ).strip().lower() or 'n'
if check == "n":
return tree_node
__lowerCAmelCase: str = TreeNode(int(SCREAMING_SNAKE_CASE ) )
__lowerCAmelCase: Union[str, Any] = right_node
q.put(SCREAMING_SNAKE_CASE )
raise
def _a ( SCREAMING_SNAKE_CASE : TreeNode ) -> None:
"""simple docstring"""
if not isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) or not node:
return
print(node.data , end=',' )
pre_order(node.left )
pre_order(node.right )
def _a ( SCREAMING_SNAKE_CASE : TreeNode ) -> None:
"""simple docstring"""
if not isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) or not node:
return
in_order(node.left )
print(node.data , end=',' )
in_order(node.right )
def _a ( SCREAMING_SNAKE_CASE : TreeNode ) -> None:
"""simple docstring"""
if not isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) or not node:
return
post_order(node.left )
post_order(node.right )
print(node.data , end=',' )
def _a ( SCREAMING_SNAKE_CASE : TreeNode ) -> None:
"""simple docstring"""
if not isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) or not node:
return
__lowerCAmelCase: queue.Queue = queue.Queue()
q.put(SCREAMING_SNAKE_CASE )
while not q.empty():
__lowerCAmelCase: Optional[int] = q.get()
print(node_dequeued.data , end=',' )
if node_dequeued.left:
q.put(node_dequeued.left )
if node_dequeued.right:
q.put(node_dequeued.right )
def _a ( SCREAMING_SNAKE_CASE : TreeNode ) -> None:
"""simple docstring"""
if not isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) or not node:
return
__lowerCAmelCase: queue.Queue = queue.Queue()
q.put(SCREAMING_SNAKE_CASE )
while not q.empty():
__lowerCAmelCase: Optional[Any] = []
while not q.empty():
__lowerCAmelCase: Dict = q.get()
print(node_dequeued.data , end=',' )
if node_dequeued.left:
list_.append(node_dequeued.left )
if node_dequeued.right:
list_.append(node_dequeued.right )
print()
for node in list_:
q.put(SCREAMING_SNAKE_CASE )
def _a ( SCREAMING_SNAKE_CASE : TreeNode ) -> None:
"""simple docstring"""
if not isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) or not node:
return
__lowerCAmelCase: list[TreeNode] = []
__lowerCAmelCase: Any = node
while n or stack:
while n: # start from root node, find its left child
print(n.data , end=',' )
stack.append(SCREAMING_SNAKE_CASE )
__lowerCAmelCase: Dict = n.left
# end of while means current node doesn't have left child
__lowerCAmelCase: List[Any] = stack.pop()
# start to traverse its right child
__lowerCAmelCase: List[Any] = n.right
def _a ( SCREAMING_SNAKE_CASE : TreeNode ) -> None:
"""simple docstring"""
if not isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) or not node:
return
__lowerCAmelCase: list[TreeNode] = []
__lowerCAmelCase: List[str] = node
while n or stack:
while n:
stack.append(SCREAMING_SNAKE_CASE )
__lowerCAmelCase: int = n.left
__lowerCAmelCase: Union[str, Any] = stack.pop()
print(n.data , end=',' )
__lowerCAmelCase: List[str] = n.right
def _a ( SCREAMING_SNAKE_CASE : TreeNode ) -> None:
"""simple docstring"""
if not isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) or not node:
return
__lowerCAmelCase , __lowerCAmelCase: Optional[Any] = [], []
__lowerCAmelCase: Optional[int] = node
stacka.append(SCREAMING_SNAKE_CASE )
while stacka: # to find the reversed order of post order, store it in stack2
__lowerCAmelCase: List[str] = stacka.pop()
if n.left:
stacka.append(n.left )
if n.right:
stacka.append(n.right )
stacka.append(SCREAMING_SNAKE_CASE )
while stacka: # pop up from stack2 will be the post order
print(stacka.pop().data , end=',' )
def _a ( SCREAMING_SNAKE_CASE : str = "" , SCREAMING_SNAKE_CASE : str=50 , SCREAMING_SNAKE_CASE : Union[str, Any]="*" ) -> str:
"""simple docstring"""
if not s:
return "\n" + width * char
__lowerCAmelCase , __lowerCAmelCase: List[str] = divmod(width - len(SCREAMING_SNAKE_CASE ) - 2 , 2 )
return f'''{left * char} {s} {(left + extra) * char}'''
if __name__ == "__main__":
import doctest
doctest.testmod()
print(prompt('''Binary Tree Traversals'''))
_a = build_tree()
print(prompt('''Pre Order Traversal'''))
pre_order(node)
print(prompt() + '''\n''')
print(prompt('''In Order Traversal'''))
in_order(node)
print(prompt() + '''\n''')
print(prompt('''Post Order Traversal'''))
post_order(node)
print(prompt() + '''\n''')
print(prompt('''Level Order Traversal'''))
level_order(node)
print(prompt() + '''\n''')
print(prompt('''Actual Level Order Traversal'''))
level_order_actual(node)
print('''*''' * 5_0 + '''\n''')
print(prompt('''Pre Order Traversal - Iteration Version'''))
pre_order_iter(node)
print(prompt() + '''\n''')
print(prompt('''In Order Traversal - Iteration Version'''))
in_order_iter(node)
print(prompt() + '''\n''')
print(prompt('''Post Order Traversal - Iteration Version'''))
post_order_iter(node)
print(prompt())
| 322 |
import unittest
import numpy as np
from transformers import AlbertConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.albert.modeling_flax_albert import (
FlaxAlbertForMaskedLM,
FlaxAlbertForMultipleChoice,
FlaxAlbertForPreTraining,
FlaxAlbertForQuestionAnswering,
FlaxAlbertForSequenceClassification,
FlaxAlbertForTokenClassification,
FlaxAlbertModel,
)
class A_ ( unittest.TestCase ):
def __init__( self : List[str] , UpperCAmelCase : Tuple , UpperCAmelCase : Optional[Any]=1_3 , UpperCAmelCase : Optional[int]=7 , UpperCAmelCase : Tuple=True , UpperCAmelCase : str=True , UpperCAmelCase : List[Any]=True , UpperCAmelCase : Union[str, Any]=True , UpperCAmelCase : List[str]=9_9 , UpperCAmelCase : Optional[int]=3_2 , UpperCAmelCase : Dict=5 , UpperCAmelCase : int=4 , UpperCAmelCase : Optional[Any]=3_7 , UpperCAmelCase : List[str]="gelu" , UpperCAmelCase : Tuple=0.1 , UpperCAmelCase : int=0.1 , UpperCAmelCase : str=5_1_2 , UpperCAmelCase : Dict=1_6 , UpperCAmelCase : Union[str, Any]=2 , UpperCAmelCase : int=0.02 , UpperCAmelCase : List[Any]=4 , ) -> Optional[Any]:
__lowerCAmelCase: str = parent
__lowerCAmelCase: Dict = batch_size
__lowerCAmelCase: Optional[int] = seq_length
__lowerCAmelCase: Dict = is_training
__lowerCAmelCase: Optional[Any] = use_attention_mask
__lowerCAmelCase: List[Any] = use_token_type_ids
__lowerCAmelCase: Optional[int] = use_labels
__lowerCAmelCase: Optional[Any] = vocab_size
__lowerCAmelCase: Optional[Any] = hidden_size
__lowerCAmelCase: Tuple = num_hidden_layers
__lowerCAmelCase: List[str] = num_attention_heads
__lowerCAmelCase: int = intermediate_size
__lowerCAmelCase: Union[str, Any] = hidden_act
__lowerCAmelCase: List[Any] = hidden_dropout_prob
__lowerCAmelCase: List[str] = attention_probs_dropout_prob
__lowerCAmelCase: Optional[int] = max_position_embeddings
__lowerCAmelCase: Union[str, Any] = type_vocab_size
__lowerCAmelCase: int = type_sequence_label_size
__lowerCAmelCase: Union[str, Any] = initializer_range
__lowerCAmelCase: Any = num_choices
def UpperCAmelCase ( self : Union[str, Any] ) -> Tuple:
__lowerCAmelCase: List[str] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__lowerCAmelCase: List[Any] = None
if self.use_attention_mask:
__lowerCAmelCase: List[str] = random_attention_mask([self.batch_size, self.seq_length] )
__lowerCAmelCase: Optional[Any] = None
if self.use_token_type_ids:
__lowerCAmelCase: List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__lowerCAmelCase: Optional[int] = AlbertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=UpperCAmelCase , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def UpperCAmelCase ( self : Dict ) -> Any:
__lowerCAmelCase: Optional[int] = self.prepare_config_and_inputs()
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase: Optional[Any] = config_and_inputs
__lowerCAmelCase: Tuple = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': attention_mask}
return config, inputs_dict
@require_flax
class A_ ( snake_case__ , unittest.TestCase ):
_lowercase : Dict = (
(
FlaxAlbertModel,
FlaxAlbertForPreTraining,
FlaxAlbertForMaskedLM,
FlaxAlbertForMultipleChoice,
FlaxAlbertForQuestionAnswering,
FlaxAlbertForSequenceClassification,
FlaxAlbertForTokenClassification,
FlaxAlbertForQuestionAnswering,
)
if is_flax_available()
else ()
)
def UpperCAmelCase ( self : List[str] ) -> Optional[int]:
__lowerCAmelCase: List[Any] = FlaxAlbertModelTester(self )
@slow
def UpperCAmelCase ( self : Tuple ) -> Dict:
for model_class_name in self.all_model_classes:
__lowerCAmelCase: Optional[Any] = model_class_name.from_pretrained('albert-base-v2' )
__lowerCAmelCase: Dict = model(np.ones((1, 1) ) )
self.assertIsNotNone(UpperCAmelCase )
@require_flax
class A_ ( unittest.TestCase ):
@slow
def UpperCAmelCase ( self : Any ) -> Any:
__lowerCAmelCase: List[Any] = FlaxAlbertModel.from_pretrained('albert-base-v2' )
__lowerCAmelCase: Optional[int] = np.array([[0, 3_4_5, 2_3_2, 3_2_8, 7_4_0, 1_4_0, 1_6_9_5, 6_9, 6_0_7_8, 1_5_8_8, 2]] )
__lowerCAmelCase: Tuple = np.array([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
__lowerCAmelCase: Tuple = model(UpperCAmelCase , attention_mask=UpperCAmelCase )[0]
__lowerCAmelCase: str = (1, 1_1, 7_6_8)
self.assertEqual(output.shape , UpperCAmelCase )
__lowerCAmelCase: List[str] = np.array(
[[[-0.6513, 1.5035, -0.2766], [-0.6515, 1.5046, -0.2780], [-0.6512, 1.5049, -0.2784]]] )
self.assertTrue(jnp.allclose(output[:, 1:4, 1:4] , UpperCAmelCase , atol=1E-4 ) )
| 322 | 1 |
from __future__ import annotations
def _a ( SCREAMING_SNAKE_CASE : list[int] , SCREAMING_SNAKE_CASE : int ) -> list[int]:
"""simple docstring"""
__lowerCAmelCase: int = 0
__lowerCAmelCase: Tuple = len(SCREAMING_SNAKE_CASE ) - 1
while i < j:
if nums[i] + nums[j] == target:
return [i, j]
elif nums[i] + nums[j] < target:
__lowerCAmelCase: Tuple = i + 1
else:
__lowerCAmelCase: List[str] = j - 1
return []
if __name__ == "__main__":
import doctest
doctest.testmod()
print(f"{two_pointer([2, 7, 1_1, 1_5], 9) = }")
| 322 |
import json
import os
import shutil
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import AutoConfig, BertConfig, GPTaConfig
from transformers.configuration_utils import PretrainedConfig
from transformers.testing_utils import TOKEN, USER, is_staging_test
sys.path.append(str(Path(__file__).parent.parent / '''utils'''))
from test_module.custom_configuration import CustomConfig # noqa E402
_a = {
'''return_dict''': False,
'''output_hidden_states''': True,
'''output_attentions''': True,
'''torchscript''': True,
'''torch_dtype''': '''float16''',
'''use_bfloat16''': True,
'''tf_legacy_loss''': True,
'''pruned_heads''': {'''a''': 1},
'''tie_word_embeddings''': False,
'''is_decoder''': True,
'''cross_attention_hidden_size''': 1_2_8,
'''add_cross_attention''': True,
'''tie_encoder_decoder''': True,
'''max_length''': 5_0,
'''min_length''': 3,
'''do_sample''': True,
'''early_stopping''': True,
'''num_beams''': 3,
'''num_beam_groups''': 3,
'''diversity_penalty''': 0.5,
'''temperature''': 2.0,
'''top_k''': 1_0,
'''top_p''': 0.7,
'''typical_p''': 0.2,
'''repetition_penalty''': 0.8,
'''length_penalty''': 0.8,
'''no_repeat_ngram_size''': 5,
'''encoder_no_repeat_ngram_size''': 5,
'''bad_words_ids''': [1, 2, 3],
'''num_return_sequences''': 3,
'''chunk_size_feed_forward''': 5,
'''output_scores''': True,
'''return_dict_in_generate''': True,
'''forced_bos_token_id''': 2,
'''forced_eos_token_id''': 3,
'''remove_invalid_values''': True,
'''architectures''': ['''BertModel'''],
'''finetuning_task''': '''translation''',
'''id2label''': {0: '''label'''},
'''label2id''': {'''label''': '''0'''},
'''tokenizer_class''': '''BertTokenizerFast''',
'''prefix''': '''prefix''',
'''bos_token_id''': 6,
'''pad_token_id''': 7,
'''eos_token_id''': 8,
'''sep_token_id''': 9,
'''decoder_start_token_id''': 1_0,
'''exponential_decay_length_penalty''': (5, 1.01),
'''suppress_tokens''': [0, 1],
'''begin_suppress_tokens''': 2,
'''task_specific_params''': {'''translation''': '''some_params'''},
'''problem_type''': '''regression''',
}
@is_staging_test
class A_ ( unittest.TestCase ):
@classmethod
def UpperCAmelCase ( cls : Dict ) -> List[str]:
__lowerCAmelCase: str = TOKEN
HfFolder.save_token(UpperCAmelCase )
@classmethod
def UpperCAmelCase ( cls : str ) -> List[Any]:
try:
delete_repo(token=cls._token , repo_id='test-config' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='valid_org/test-config-org' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='test-dynamic-config' )
except HTTPError:
pass
def UpperCAmelCase ( self : int ) -> Optional[int]:
__lowerCAmelCase: Any = BertConfig(
vocab_size=9_9 , hidden_size=3_2 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=3_7 )
config.push_to_hub('test-config' , use_auth_token=self._token )
__lowerCAmelCase: str = BertConfig.from_pretrained(F'''{USER}/test-config''' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(UpperCAmelCase , getattr(UpperCAmelCase , UpperCAmelCase ) )
# Reset repo
delete_repo(token=self._token , repo_id='test-config' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(UpperCAmelCase , repo_id='test-config' , push_to_hub=UpperCAmelCase , use_auth_token=self._token )
__lowerCAmelCase: Union[str, Any] = BertConfig.from_pretrained(F'''{USER}/test-config''' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(UpperCAmelCase , getattr(UpperCAmelCase , UpperCAmelCase ) )
def UpperCAmelCase ( self : int ) -> Dict:
__lowerCAmelCase: int = BertConfig(
vocab_size=9_9 , hidden_size=3_2 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=3_7 )
config.push_to_hub('valid_org/test-config-org' , use_auth_token=self._token )
__lowerCAmelCase: Dict = BertConfig.from_pretrained('valid_org/test-config-org' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(UpperCAmelCase , getattr(UpperCAmelCase , UpperCAmelCase ) )
# Reset repo
delete_repo(token=self._token , repo_id='valid_org/test-config-org' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(
UpperCAmelCase , repo_id='valid_org/test-config-org' , push_to_hub=UpperCAmelCase , use_auth_token=self._token )
__lowerCAmelCase: int = BertConfig.from_pretrained('valid_org/test-config-org' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(UpperCAmelCase , getattr(UpperCAmelCase , UpperCAmelCase ) )
def UpperCAmelCase ( self : Union[str, Any] ) -> List[str]:
CustomConfig.register_for_auto_class()
__lowerCAmelCase: Any = CustomConfig(attribute=4_2 )
config.push_to_hub('test-dynamic-config' , use_auth_token=self._token )
# This has added the proper auto_map field to the config
self.assertDictEqual(config.auto_map , {'AutoConfig': 'custom_configuration.CustomConfig'} )
__lowerCAmelCase: int = AutoConfig.from_pretrained(F'''{USER}/test-dynamic-config''' , trust_remote_code=UpperCAmelCase )
# Can't make an isinstance check because the new_config is from the FakeConfig class of a dynamic module
self.assertEqual(new_config.__class__.__name__ , 'CustomConfig' )
self.assertEqual(new_config.attribute , 4_2 )
class A_ ( unittest.TestCase ):
def UpperCAmelCase ( self : Union[str, Any] ) -> int:
__lowerCAmelCase: List[Any] = GPTaConfig()
# attempt to modify each of int/float/bool/str config records and verify they were updated
__lowerCAmelCase: Union[str, Any] = c.n_embd + 1 # int
__lowerCAmelCase: str = c.resid_pdrop + 1.0 # float
__lowerCAmelCase: List[Any] = not c.scale_attn_weights # bool
__lowerCAmelCase: List[str] = c.summary_type + 'foo' # str
c.update_from_string(
F'''n_embd={n_embd},resid_pdrop={resid_pdrop},scale_attn_weights={scale_attn_weights},summary_type={summary_type}''' )
self.assertEqual(UpperCAmelCase , c.n_embd , 'mismatch for key: n_embd' )
self.assertEqual(UpperCAmelCase , c.resid_pdrop , 'mismatch for key: resid_pdrop' )
self.assertEqual(UpperCAmelCase , c.scale_attn_weights , 'mismatch for key: scale_attn_weights' )
self.assertEqual(UpperCAmelCase , c.summary_type , 'mismatch for key: summary_type' )
def UpperCAmelCase ( self : Optional[Any] ) -> Any:
__lowerCAmelCase: str = PretrainedConfig()
__lowerCAmelCase: Optional[int] = [key for key in base_config.__dict__ if key not in config_common_kwargs]
# If this part of the test fails, you have arguments to addin config_common_kwargs above.
self.assertListEqual(
UpperCAmelCase , ['is_encoder_decoder', '_name_or_path', '_commit_hash', 'transformers_version'] )
__lowerCAmelCase: int = [key for key, value in config_common_kwargs.items() if value == getattr(UpperCAmelCase , UpperCAmelCase )]
if len(UpperCAmelCase ) > 0:
raise ValueError(
'The following keys are set with the default values in'
' `test_configuration_common.config_common_kwargs` pick another value for them:'
F''' {', '.join(UpperCAmelCase )}.''' )
def UpperCAmelCase ( self : int ) -> Optional[Any]:
with self.assertRaises(UpperCAmelCase ):
# config is in subfolder, the following should not work without specifying the subfolder
__lowerCAmelCase: List[Any] = BertConfig.from_pretrained('hf-internal-testing/tiny-random-bert-subfolder' )
__lowerCAmelCase: List[str] = BertConfig.from_pretrained('hf-internal-testing/tiny-random-bert-subfolder' , subfolder='bert' )
self.assertIsNotNone(UpperCAmelCase )
def UpperCAmelCase ( self : Tuple ) -> List[Any]:
# A mock response for an HTTP head request to emulate server down
__lowerCAmelCase: Union[str, Any] = mock.Mock()
__lowerCAmelCase: str = 5_0_0
__lowerCAmelCase: Optional[Any] = {}
__lowerCAmelCase: Optional[int] = HTTPError
__lowerCAmelCase: List[Any] = {}
# Download this model to make sure it's in the cache.
__lowerCAmelCase: Tuple = BertConfig.from_pretrained('hf-internal-testing/tiny-random-bert' )
# Under the mock environment we get a 500 error when trying to reach the model.
with mock.patch('requests.Session.request' , return_value=UpperCAmelCase ) as mock_head:
__lowerCAmelCase: Union[str, Any] = BertConfig.from_pretrained('hf-internal-testing/tiny-random-bert' )
# This check we did call the fake head request
mock_head.assert_called()
def UpperCAmelCase ( self : Any ) -> Optional[Any]:
# This test is for deprecated behavior and can be removed in v5
__lowerCAmelCase: Tuple = BertConfig.from_pretrained(
'https://huggingface.co/hf-internal-testing/tiny-random-bert/resolve/main/config.json' )
def UpperCAmelCase ( self : Dict ) -> str:
__lowerCAmelCase: Optional[Any] = AutoConfig.from_pretrained('bert-base-cased' )
__lowerCAmelCase: Optional[Any] = ['config.4.0.0.json']
with tempfile.TemporaryDirectory() as tmp_dir:
configuration.save_pretrained(UpperCAmelCase )
__lowerCAmelCase: Tuple = 2
json.dump(configuration.to_dict() , open(os.path.join(UpperCAmelCase , 'config.4.0.0.json' ) , 'w' ) )
# This should pick the new configuration file as the version of Transformers is > 4.0.0
__lowerCAmelCase: Dict = AutoConfig.from_pretrained(UpperCAmelCase )
self.assertEqual(new_configuration.hidden_size , 2 )
# Will need to be adjusted if we reach v42 and this test is still here.
# Should pick the old configuration file as the version of Transformers is < 4.42.0
__lowerCAmelCase: Dict = ['config.42.0.0.json']
__lowerCAmelCase: Optional[int] = 7_6_8
configuration.save_pretrained(UpperCAmelCase )
shutil.move(os.path.join(UpperCAmelCase , 'config.4.0.0.json' ) , os.path.join(UpperCAmelCase , 'config.42.0.0.json' ) )
__lowerCAmelCase: int = AutoConfig.from_pretrained(UpperCAmelCase )
self.assertEqual(new_configuration.hidden_size , 7_6_8 )
def UpperCAmelCase ( self : Union[str, Any] ) -> Dict:
# This repo has two configuration files, one for v4.0.0 and above with a different hidden size.
__lowerCAmelCase: Tuple = 'hf-internal-testing/test-two-configs'
import transformers as new_transformers
__lowerCAmelCase: List[Any] = 'v4.0.0'
__lowerCAmelCase , __lowerCAmelCase: Any = new_transformers.models.auto.AutoConfig.from_pretrained(
UpperCAmelCase , return_unused_kwargs=UpperCAmelCase )
self.assertEqual(new_configuration.hidden_size , 2 )
# This checks `_configuration_file` ia not kept in the kwargs by mistake.
self.assertDictEqual(UpperCAmelCase , {} )
# Testing an older version by monkey-patching the version in the module it's used.
import transformers as old_transformers
__lowerCAmelCase: List[Any] = 'v3.0.0'
__lowerCAmelCase: Union[str, Any] = old_transformers.models.auto.AutoConfig.from_pretrained(UpperCAmelCase )
self.assertEqual(old_configuration.hidden_size , 7_6_8 )
| 322 | 1 |
def _a ( SCREAMING_SNAKE_CASE : Optional[int] ) -> Optional[int]:
"""simple docstring"""
__lowerCAmelCase: List[Any] = 0
__lowerCAmelCase: Optional[int] = len(SCREAMING_SNAKE_CASE )
for i in range(n - 1 ):
for j in range(i + 1 , SCREAMING_SNAKE_CASE ):
if arr[i] > arr[j]:
num_inversions += 1
return num_inversions
def _a ( SCREAMING_SNAKE_CASE : Any ) -> str:
"""simple docstring"""
if len(SCREAMING_SNAKE_CASE ) <= 1:
return arr, 0
__lowerCAmelCase: str = len(SCREAMING_SNAKE_CASE ) // 2
__lowerCAmelCase: str = arr[0:mid]
__lowerCAmelCase: int = arr[mid:]
__lowerCAmelCase , __lowerCAmelCase: List[Any] = count_inversions_recursive(SCREAMING_SNAKE_CASE )
__lowerCAmelCase , __lowerCAmelCase: Dict = count_inversions_recursive(SCREAMING_SNAKE_CASE )
__lowerCAmelCase , __lowerCAmelCase: int = _count_cross_inversions(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
__lowerCAmelCase: int = inversion_p + inversions_q + cross_inversions
return c, num_inversions
def _a ( SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : Any ) -> Optional[int]:
"""simple docstring"""
__lowerCAmelCase: List[str] = []
__lowerCAmelCase: List[str] = 0
while i < len(SCREAMING_SNAKE_CASE ) and j < len(SCREAMING_SNAKE_CASE ):
if p[i] > q[j]:
# if P[1] > Q[j], then P[k] > Q[k] for all i < k <= len(P)
# These are all inversions. The claim emerges from the
# property that P is sorted.
num_inversion += len(SCREAMING_SNAKE_CASE ) - i
r.append(q[j] )
j += 1
else:
r.append(p[i] )
i += 1
if i < len(SCREAMING_SNAKE_CASE ):
r.extend(p[i:] )
else:
r.extend(q[j:] )
return r, num_inversion
def _a ( ) -> int:
"""simple docstring"""
__lowerCAmelCase: List[Any] = [10, 2, 1, 5, 5, 2, 11]
# this arr has 8 inversions:
# (10, 2), (10, 1), (10, 5), (10, 5), (10, 2), (2, 1), (5, 2), (5, 2)
__lowerCAmelCase: Tuple = count_inversions_bf(SCREAMING_SNAKE_CASE )
__lowerCAmelCase , __lowerCAmelCase: str = count_inversions_recursive(SCREAMING_SNAKE_CASE )
assert num_inversions_bf == num_inversions_recursive == 8
print('number of inversions = ' , SCREAMING_SNAKE_CASE )
# testing an array with zero inversion (a sorted arr_1)
arr_a.sort()
__lowerCAmelCase: Tuple = count_inversions_bf(SCREAMING_SNAKE_CASE )
__lowerCAmelCase , __lowerCAmelCase: Optional[Any] = count_inversions_recursive(SCREAMING_SNAKE_CASE )
assert num_inversions_bf == num_inversions_recursive == 0
print('number of inversions = ' , SCREAMING_SNAKE_CASE )
# an empty list should also have zero inversions
__lowerCAmelCase: int = []
__lowerCAmelCase: Any = count_inversions_bf(SCREAMING_SNAKE_CASE )
__lowerCAmelCase , __lowerCAmelCase: Dict = count_inversions_recursive(SCREAMING_SNAKE_CASE )
assert num_inversions_bf == num_inversions_recursive == 0
print('number of inversions = ' , SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
main()
| 322 |
_a = [sum(int(c, 1_0) ** 2 for c in i.__str__()) for i in range(1_0_0_0_0_0)]
def _a ( SCREAMING_SNAKE_CASE : int ) -> int:
"""simple docstring"""
__lowerCAmelCase: Optional[int] = 0
while number:
# Increased Speed Slightly by checking every 5 digits together.
sum_of_digits_squared += DIGITS_SQUARED[number % 10_00_00]
number //= 10_00_00
return sum_of_digits_squared
# There are 2 Chains made,
# One ends with 89 with the chain member 58 being the one which when declared first,
# there will be the least number of iterations for all the members to be checked.
# The other one ends with 1 and has only one element 1.
# So 58 and 1 are chosen to be declared at the starting.
# Changed dictionary to an array to quicken the solution
_a = [None] * 1_0_0_0_0_0_0_0
_a = True
_a = False
def _a ( SCREAMING_SNAKE_CASE : int ) -> bool:
"""simple docstring"""
if CHAINS[number - 1] is not None:
return CHAINS[number - 1] # type: ignore
__lowerCAmelCase: int = chain(next_number(SCREAMING_SNAKE_CASE ) )
__lowerCAmelCase: Tuple = number_chain
while number < 10_00_00_00:
__lowerCAmelCase: Dict = number_chain
number *= 10
return number_chain
def _a ( SCREAMING_SNAKE_CASE : int = 10_00_00_00 ) -> int:
"""simple docstring"""
for i in range(1 , SCREAMING_SNAKE_CASE ):
if CHAINS[i] is None:
chain(i + 1 )
return CHAINS[:number].count(SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
import doctest
doctest.testmod()
print(f"{solution() = }")
| 322 | 1 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
_a = logging.get_logger(__name__)
_a = {
'''microsoft/resnet-50''': '''https://huggingface.co/microsoft/resnet-50/blob/main/config.json''',
}
class A_ ( snake_case__ , snake_case__ ):
_lowercase : Any = 'resnet'
_lowercase : List[Any] = ['basic', 'bottleneck']
def __init__( self : Dict , UpperCAmelCase : Dict=3 , UpperCAmelCase : Any=6_4 , UpperCAmelCase : List[Any]=[2_5_6, 5_1_2, 1_0_2_4, 2_0_4_8] , UpperCAmelCase : int=[3, 4, 6, 3] , UpperCAmelCase : Union[str, Any]="bottleneck" , UpperCAmelCase : Union[str, Any]="relu" , UpperCAmelCase : int=False , UpperCAmelCase : Any=None , UpperCAmelCase : List[Any]=None , **UpperCAmelCase : Tuple , ) -> str:
super().__init__(**UpperCAmelCase )
if layer_type not in self.layer_types:
raise ValueError(F'''layer_type={layer_type} is not one of {','.join(self.layer_types )}''' )
__lowerCAmelCase: Tuple = num_channels
__lowerCAmelCase: Any = embedding_size
__lowerCAmelCase: Optional[int] = hidden_sizes
__lowerCAmelCase: Optional[int] = depths
__lowerCAmelCase: str = layer_type
__lowerCAmelCase: Union[str, Any] = hidden_act
__lowerCAmelCase: Optional[int] = downsample_in_first_stage
__lowerCAmelCase: Dict = ['stem'] + [F'''stage{idx}''' for idx in range(1 , len(UpperCAmelCase ) + 1 )]
__lowerCAmelCase , __lowerCAmelCase: Tuple = get_aligned_output_features_output_indices(
out_features=UpperCAmelCase , out_indices=UpperCAmelCase , stage_names=self.stage_names )
class A_ ( snake_case__ ):
_lowercase : Tuple = version.parse('1.11' )
@property
def UpperCAmelCase ( self : Optional[int] ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
] )
@property
def UpperCAmelCase ( self : int ) -> float:
return 1E-3
| 322 |
def _a ( SCREAMING_SNAKE_CASE : int ) -> bool:
"""simple docstring"""
if not isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
__lowerCAmelCase: List[Any] = f'''Input value of [number={number}] must be an integer'''
raise TypeError(SCREAMING_SNAKE_CASE )
if number < 0:
return False
__lowerCAmelCase: str = number * number
while number > 0:
if number % 10 != number_square % 10:
return False
number //= 10
number_square //= 10
return True
if __name__ == "__main__":
import doctest
doctest.testmod()
| 322 | 1 |
import unittest
from transformers import CamembertTokenizer, CamembertTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.utils import is_torch_available
from ...test_tokenization_common import TokenizerTesterMixin
_a = get_tests_dir('''fixtures/test_sentencepiece.model''')
_a = get_tests_dir('''fixtures/test_sentencepiece_bpe.model''')
_a = '''pt''' if is_torch_available() else '''tf'''
@require_sentencepiece
@require_tokenizers
class A_ ( snake_case__ , unittest.TestCase ):
_lowercase : Dict = CamembertTokenizer
_lowercase : Any = CamembertTokenizerFast
_lowercase : Any = True
_lowercase : Optional[Any] = True
def UpperCAmelCase ( self : List[Any] ) -> int:
super().setUp()
# We have a SentencePiece fixture for testing
__lowerCAmelCase: int = CamembertTokenizer(UpperCAmelCase )
tokenizer.save_pretrained(self.tmpdirname )
def UpperCAmelCase ( self : Optional[Any] ) -> Optional[Any]:
__lowerCAmelCase: List[Any] = '<pad>'
__lowerCAmelCase: int = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(UpperCAmelCase ) , UpperCAmelCase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(UpperCAmelCase ) , UpperCAmelCase )
def UpperCAmelCase ( self : Union[str, Any] ) -> int:
__lowerCAmelCase: int = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '<s>NOTUSED' )
self.assertEqual(vocab_keys[1] , '<pad>' )
self.assertEqual(vocab_keys[-1] , '<mask>' )
self.assertEqual(len(UpperCAmelCase ) , 1_0_0_4 )
def UpperCAmelCase ( self : int ) -> Optional[int]:
self.assertEqual(self.get_tokenizer().vocab_size , 1_0_0_5 )
def UpperCAmelCase ( self : int ) -> Tuple:
__lowerCAmelCase: Any = CamembertTokenizer(UpperCAmelCase )
tokenizer.save_pretrained(self.tmpdirname )
__lowerCAmelCase: Tuple = CamembertTokenizerFast.from_pretrained(self.tmpdirname )
__lowerCAmelCase: Optional[int] = 'I was born in 92000, and this is falsé.'
__lowerCAmelCase: int = tokenizer.encode(UpperCAmelCase )
__lowerCAmelCase: Optional[Any] = rust_tokenizer.encode(UpperCAmelCase )
self.assertListEqual(UpperCAmelCase , UpperCAmelCase )
__lowerCAmelCase: int = tokenizer.encode(UpperCAmelCase , add_special_tokens=UpperCAmelCase )
__lowerCAmelCase: Dict = rust_tokenizer.encode(UpperCAmelCase , add_special_tokens=UpperCAmelCase )
self.assertListEqual(UpperCAmelCase , UpperCAmelCase )
# <unk> tokens are not the same for `rust` than for `slow`.
# Because spm gives back raw token instead of `unk` in EncodeAsPieces
# tokens = tokenizer.tokenize(sequence)
__lowerCAmelCase: Dict = tokenizer.convert_ids_to_tokens(UpperCAmelCase )
__lowerCAmelCase: Optional[int] = rust_tokenizer.tokenize(UpperCAmelCase )
self.assertListEqual(UpperCAmelCase , UpperCAmelCase )
def UpperCAmelCase ( self : List[Any] ) -> List[Any]:
if not self.test_rust_tokenizer:
return
__lowerCAmelCase: Union[str, Any] = self.get_tokenizer()
__lowerCAmelCase: str = self.get_rust_tokenizer()
__lowerCAmelCase: Union[str, Any] = 'I was born in 92000, and this is falsé.'
__lowerCAmelCase: str = tokenizer.tokenize(UpperCAmelCase )
__lowerCAmelCase: Any = rust_tokenizer.tokenize(UpperCAmelCase )
self.assertListEqual(UpperCAmelCase , UpperCAmelCase )
__lowerCAmelCase: List[str] = tokenizer.encode(UpperCAmelCase , add_special_tokens=UpperCAmelCase )
__lowerCAmelCase: Optional[int] = rust_tokenizer.encode(UpperCAmelCase , add_special_tokens=UpperCAmelCase )
self.assertListEqual(UpperCAmelCase , UpperCAmelCase )
__lowerCAmelCase: List[str] = self.get_rust_tokenizer()
__lowerCAmelCase: List[str] = tokenizer.encode(UpperCAmelCase )
__lowerCAmelCase: Union[str, Any] = rust_tokenizer.encode(UpperCAmelCase )
self.assertListEqual(UpperCAmelCase , UpperCAmelCase )
@slow
def UpperCAmelCase ( self : Optional[Any] ) -> Dict:
# fmt: off
__lowerCAmelCase: int = {'input_ids': [[5, 5_4, 7_1_9_6, 2_9_7, 3_0, 2_3, 7_7_6, 1_8, 1_1, 3_2_1_5, 3_7_0_5, 8_2_5_2, 2_2, 3_1_6_4, 1_1_8_1, 2_1_1_6, 2_9, 1_6, 8_1_3, 2_5, 7_9_1, 3_3_1_4, 2_0, 3_4_4_6, 3_8, 2_7_5_7_5, 1_2_0, 6, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [5, 4_6_8, 1_7, 1_1, 9_0_8_8, 2_0, 1_5_1_7, 8, 2_2_8_0_4, 1_8_8_1_8, 1_0, 3_8, 6_2_9, 6_0_7, 6_0_7, 1_4_2, 1_9, 7_1_9_6, 8_6_7, 5_6, 1_0_3_2_6, 2_4, 2_2_6_7, 2_0, 4_1_6, 5_0_7_2, 1_5_6_1_2, 2_3_3, 7_3_4, 7, 2_3_9_9, 2_7, 1_6, 3_0_1_5, 1_6_4_9, 7, 2_4, 2_0, 4_3_3_8, 2_3_9_9, 2_7, 1_3, 3_4_0_0, 1_4, 1_3, 6_1_8_9, 8, 9_3_0, 9, 6]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501
# fmt: on
# camembert is a french model. So we also use french texts.
__lowerCAmelCase: Tuple = [
'Le transformeur est un modèle d\'apprentissage profond introduit en 2017, '
'utilisé principalement dans le domaine du traitement automatique des langues (TAL).',
'À l\'instar des réseaux de neurones récurrents (RNN), les transformeurs sont conçus '
'pour gérer des données séquentielles, telles que le langage naturel, pour des tâches '
'telles que la traduction et la synthèse de texte.',
]
self.tokenizer_integration_test_util(
expected_encoding=UpperCAmelCase , model_name='camembert-base' , revision='3a0641d9a1aeb7e848a74299e7e4c4bca216b4cf' , sequences=UpperCAmelCase , )
| 322 |
import unittest
from transformers import XLMConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
XLMForMultipleChoice,
XLMForQuestionAnswering,
XLMForQuestionAnsweringSimple,
XLMForSequenceClassification,
XLMForTokenClassification,
XLMModel,
XLMWithLMHeadModel,
)
from transformers.models.xlm.modeling_xlm import XLM_PRETRAINED_MODEL_ARCHIVE_LIST
class A_ :
def __init__( self : Any , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : str=1_3 , UpperCAmelCase : Optional[Any]=7 , UpperCAmelCase : str=True , UpperCAmelCase : Any=True , UpperCAmelCase : Tuple=True , UpperCAmelCase : Any=True , UpperCAmelCase : Optional[int]=True , UpperCAmelCase : List[str]=False , UpperCAmelCase : Tuple=False , UpperCAmelCase : int=False , UpperCAmelCase : Optional[int]=2 , UpperCAmelCase : Any=9_9 , UpperCAmelCase : str=0 , UpperCAmelCase : Dict=3_2 , UpperCAmelCase : int=5 , UpperCAmelCase : Optional[int]=4 , UpperCAmelCase : Any=0.1 , UpperCAmelCase : str=0.1 , UpperCAmelCase : int=5_1_2 , UpperCAmelCase : str=2 , UpperCAmelCase : Optional[int]=0.02 , UpperCAmelCase : Optional[Any]=2 , UpperCAmelCase : List[str]=4 , UpperCAmelCase : Dict="last" , UpperCAmelCase : int=True , UpperCAmelCase : Dict=None , UpperCAmelCase : Union[str, Any]=0 , ) -> Dict:
__lowerCAmelCase: Optional[int] = parent
__lowerCAmelCase: Dict = batch_size
__lowerCAmelCase: Tuple = seq_length
__lowerCAmelCase: Tuple = is_training
__lowerCAmelCase: Optional[Any] = use_input_lengths
__lowerCAmelCase: List[str] = use_token_type_ids
__lowerCAmelCase: Dict = use_labels
__lowerCAmelCase: int = gelu_activation
__lowerCAmelCase: Optional[int] = sinusoidal_embeddings
__lowerCAmelCase: Tuple = causal
__lowerCAmelCase: Optional[Any] = asm
__lowerCAmelCase: int = n_langs
__lowerCAmelCase: Tuple = vocab_size
__lowerCAmelCase: List[Any] = n_special
__lowerCAmelCase: List[Any] = hidden_size
__lowerCAmelCase: Union[str, Any] = num_hidden_layers
__lowerCAmelCase: Dict = num_attention_heads
__lowerCAmelCase: int = hidden_dropout_prob
__lowerCAmelCase: List[str] = attention_probs_dropout_prob
__lowerCAmelCase: Dict = max_position_embeddings
__lowerCAmelCase: List[str] = type_sequence_label_size
__lowerCAmelCase: str = initializer_range
__lowerCAmelCase: List[str] = num_labels
__lowerCAmelCase: List[str] = num_choices
__lowerCAmelCase: Optional[int] = summary_type
__lowerCAmelCase: Any = use_proj
__lowerCAmelCase: Optional[Any] = scope
__lowerCAmelCase: Dict = bos_token_id
def UpperCAmelCase ( self : Union[str, Any] ) -> Tuple:
__lowerCAmelCase: Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__lowerCAmelCase: str = random_attention_mask([self.batch_size, self.seq_length] )
__lowerCAmelCase: Any = None
if self.use_input_lengths:
__lowerCAmelCase: Optional[Any] = (
ids_tensor([self.batch_size] , vocab_size=2 ) + self.seq_length - 2
) # small variation of seq_length
__lowerCAmelCase: str = None
if self.use_token_type_ids:
__lowerCAmelCase: Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.n_langs )
__lowerCAmelCase: int = None
__lowerCAmelCase: Optional[int] = None
__lowerCAmelCase: Optional[int] = None
if self.use_labels:
__lowerCAmelCase: Tuple = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__lowerCAmelCase: Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__lowerCAmelCase: Optional[int] = ids_tensor([self.batch_size] , 2 ).float()
__lowerCAmelCase: str = ids_tensor([self.batch_size] , self.num_choices )
__lowerCAmelCase: Dict = self.get_config()
return (
config,
input_ids,
token_type_ids,
input_lengths,
sequence_labels,
token_labels,
is_impossible_labels,
choice_labels,
input_mask,
)
def UpperCAmelCase ( self : Tuple ) -> List[Any]:
return XLMConfig(
vocab_size=self.vocab_size , n_special=self.n_special , emb_dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , gelu_activation=self.gelu_activation , sinusoidal_embeddings=self.sinusoidal_embeddings , asm=self.asm , causal=self.causal , n_langs=self.n_langs , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , summary_type=self.summary_type , use_proj=self.use_proj , num_labels=self.num_labels , bos_token_id=self.bos_token_id , )
def UpperCAmelCase ( self : int , UpperCAmelCase : Optional[Any] , UpperCAmelCase : Tuple , UpperCAmelCase : Optional[int] , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : Dict , UpperCAmelCase : Optional[Any] , UpperCAmelCase : int , UpperCAmelCase : int , UpperCAmelCase : List[str] , ) -> Optional[int]:
__lowerCAmelCase: List[str] = XLMModel(config=UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
__lowerCAmelCase: Any = model(UpperCAmelCase , lengths=UpperCAmelCase , langs=UpperCAmelCase )
__lowerCAmelCase: List[str] = model(UpperCAmelCase , langs=UpperCAmelCase )
__lowerCAmelCase: List[str] = model(UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCAmelCase ( self : Union[str, Any] , UpperCAmelCase : Optional[Any] , UpperCAmelCase : Optional[int] , UpperCAmelCase : Optional[int] , UpperCAmelCase : Dict , UpperCAmelCase : Any , UpperCAmelCase : List[Any] , UpperCAmelCase : Tuple , UpperCAmelCase : Tuple , UpperCAmelCase : Dict , ) -> int:
__lowerCAmelCase: str = XLMWithLMHeadModel(UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
__lowerCAmelCase: Union[str, Any] = model(UpperCAmelCase , token_type_ids=UpperCAmelCase , labels=UpperCAmelCase )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCAmelCase ( self : Optional[int] , UpperCAmelCase : Optional[int] , UpperCAmelCase : Dict , UpperCAmelCase : Optional[int] , UpperCAmelCase : Any , UpperCAmelCase : Optional[int] , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : List[str] , UpperCAmelCase : str , UpperCAmelCase : Dict , ) -> List[str]:
__lowerCAmelCase: Dict = XLMForQuestionAnsweringSimple(UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
__lowerCAmelCase: str = model(UpperCAmelCase )
__lowerCAmelCase: List[str] = model(UpperCAmelCase , start_positions=UpperCAmelCase , end_positions=UpperCAmelCase )
__lowerCAmelCase: Optional[Any] = outputs
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def UpperCAmelCase ( self : List[str] , UpperCAmelCase : Dict , UpperCAmelCase : List[Any] , UpperCAmelCase : Optional[int] , UpperCAmelCase : Optional[Any] , UpperCAmelCase : str , UpperCAmelCase : List[str] , UpperCAmelCase : List[Any] , UpperCAmelCase : Any , UpperCAmelCase : Optional[int] , ) -> Tuple:
__lowerCAmelCase: Union[str, Any] = XLMForQuestionAnswering(UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
__lowerCAmelCase: List[str] = model(UpperCAmelCase )
__lowerCAmelCase: Union[str, Any] = model(
UpperCAmelCase , start_positions=UpperCAmelCase , end_positions=UpperCAmelCase , cls_index=UpperCAmelCase , is_impossible=UpperCAmelCase , p_mask=UpperCAmelCase , )
__lowerCAmelCase: Any = model(
UpperCAmelCase , start_positions=UpperCAmelCase , end_positions=UpperCAmelCase , cls_index=UpperCAmelCase , is_impossible=UpperCAmelCase , )
((__lowerCAmelCase) , ): List[str] = result_with_labels.to_tuple()
__lowerCAmelCase: Union[str, Any] = model(UpperCAmelCase , start_positions=UpperCAmelCase , end_positions=UpperCAmelCase )
((__lowerCAmelCase) , ): List[Any] = result_with_labels.to_tuple()
self.parent.assertEqual(result_with_labels.loss.shape , () )
self.parent.assertEqual(result.start_top_log_probs.shape , (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(result.start_top_index.shape , (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(
result.end_top_log_probs.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(
result.end_top_index.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(result.cls_logits.shape , (self.batch_size,) )
def UpperCAmelCase ( self : Dict , UpperCAmelCase : Dict , UpperCAmelCase : Optional[Any] , UpperCAmelCase : Optional[Any] , UpperCAmelCase : List[str] , UpperCAmelCase : List[str] , UpperCAmelCase : Any , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : str , UpperCAmelCase : List[str] , ) -> List[Any]:
__lowerCAmelCase: Optional[Any] = XLMForSequenceClassification(UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
__lowerCAmelCase: List[Any] = model(UpperCAmelCase )
__lowerCAmelCase: Tuple = model(UpperCAmelCase , labels=UpperCAmelCase )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def UpperCAmelCase ( self : Optional[Any] , UpperCAmelCase : str , UpperCAmelCase : int , UpperCAmelCase : str , UpperCAmelCase : Tuple , UpperCAmelCase : Optional[Any] , UpperCAmelCase : List[str] , UpperCAmelCase : List[str] , UpperCAmelCase : Dict , UpperCAmelCase : Union[str, Any] , ) -> List[Any]:
__lowerCAmelCase: Union[str, Any] = self.num_labels
__lowerCAmelCase: Tuple = XLMForTokenClassification(UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
__lowerCAmelCase: Optional[int] = model(UpperCAmelCase , attention_mask=UpperCAmelCase , labels=UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def UpperCAmelCase ( self : str , UpperCAmelCase : Tuple , UpperCAmelCase : str , UpperCAmelCase : Any , UpperCAmelCase : str , UpperCAmelCase : str , UpperCAmelCase : str , UpperCAmelCase : str , UpperCAmelCase : List[str] , UpperCAmelCase : List[Any] , ) -> Union[str, Any]:
__lowerCAmelCase: List[Any] = self.num_choices
__lowerCAmelCase: Optional[Any] = XLMForMultipleChoice(config=UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
__lowerCAmelCase: List[Any] = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__lowerCAmelCase: List[str] = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__lowerCAmelCase: Dict = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__lowerCAmelCase: Any = model(
UpperCAmelCase , attention_mask=UpperCAmelCase , token_type_ids=UpperCAmelCase , labels=UpperCAmelCase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def UpperCAmelCase ( self : Tuple ) -> int:
__lowerCAmelCase: Optional[Any] = self.prepare_config_and_inputs()
(
(
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) ,
): Union[str, Any] = config_and_inputs
__lowerCAmelCase: Any = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'lengths': input_lengths}
return config, inputs_dict
@require_torch
class A_ ( snake_case__ , snake_case__ , snake_case__ , unittest.TestCase ):
_lowercase : Any = (
(
XLMModel,
XLMWithLMHeadModel,
XLMForQuestionAnswering,
XLMForSequenceClassification,
XLMForQuestionAnsweringSimple,
XLMForTokenClassification,
XLMForMultipleChoice,
)
if is_torch_available()
else ()
)
_lowercase : Any = (
(XLMWithLMHeadModel,) if is_torch_available() else ()
) # TODO (PVP): Check other models whether language generation is also applicable
_lowercase : Optional[int] = (
{
'feature-extraction': XLMModel,
'fill-mask': XLMWithLMHeadModel,
'question-answering': XLMForQuestionAnsweringSimple,
'text-classification': XLMForSequenceClassification,
'text-generation': XLMWithLMHeadModel,
'token-classification': XLMForTokenClassification,
'zero-shot': XLMForSequenceClassification,
}
if is_torch_available()
else {}
)
def UpperCAmelCase ( self : Any , UpperCAmelCase : List[Any] , UpperCAmelCase : Optional[Any] , UpperCAmelCase : str , UpperCAmelCase : str , UpperCAmelCase : str ) -> int:
if (
pipeline_test_casse_name == "QAPipelineTests"
and tokenizer_name is not None
and not tokenizer_name.endswith('Fast' )
):
# `QAPipelineTests` fails for a few models when the slower tokenizer are used.
# (The slower tokenizers were never used for pipeline tests before the pipeline testing rework)
# TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer
return True
return False
def UpperCAmelCase ( self : Any , UpperCAmelCase : Optional[Any] , UpperCAmelCase : Tuple , UpperCAmelCase : Tuple=False ) -> Dict:
__lowerCAmelCase: Optional[Any] = super()._prepare_for_class(UpperCAmelCase , UpperCAmelCase , return_labels=UpperCAmelCase )
if return_labels:
if model_class.__name__ == "XLMForQuestionAnswering":
__lowerCAmelCase: str = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=UpperCAmelCase )
__lowerCAmelCase: Optional[Any] = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=UpperCAmelCase )
return inputs_dict
def UpperCAmelCase ( self : Union[str, Any] ) -> int:
__lowerCAmelCase: int = XLMModelTester(self )
__lowerCAmelCase: Optional[int] = ConfigTester(self , config_class=UpperCAmelCase , emb_dim=3_7 )
def UpperCAmelCase ( self : List[str] ) -> List[Any]:
self.config_tester.run_common_tests()
def UpperCAmelCase ( self : Dict ) -> List[Any]:
__lowerCAmelCase: str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_model(*UpperCAmelCase )
def UpperCAmelCase ( self : List[Any] ) -> int:
__lowerCAmelCase: Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_lm_head(*UpperCAmelCase )
def UpperCAmelCase ( self : Tuple ) -> Tuple:
__lowerCAmelCase: Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_simple_qa(*UpperCAmelCase )
def UpperCAmelCase ( self : Optional[Any] ) -> Tuple:
__lowerCAmelCase: Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_qa(*UpperCAmelCase )
def UpperCAmelCase ( self : Optional[Any] ) -> Any:
__lowerCAmelCase: List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_sequence_classif(*UpperCAmelCase )
def UpperCAmelCase ( self : Tuple ) -> Tuple:
__lowerCAmelCase: Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_token_classif(*UpperCAmelCase )
def UpperCAmelCase ( self : Any ) -> Any:
__lowerCAmelCase: Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_for_multiple_choice(*UpperCAmelCase )
def UpperCAmelCase ( self : Optional[Any] , UpperCAmelCase : int , UpperCAmelCase : Tuple , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : Dict , UpperCAmelCase : Optional[Any] , UpperCAmelCase : Optional[int]=False , UpperCAmelCase : Dict=1 ) -> Dict:
self.assertIsInstance(UpperCAmelCase , UpperCAmelCase )
self.assertListEqual(
[isinstance(UpperCAmelCase , UpperCAmelCase ) for iter_attentions in attentions] , [True] * len(UpperCAmelCase ) )
self.assertEqual(len(UpperCAmelCase ) , (max_length - min_length) * num_beam_groups )
for idx, iter_attentions in enumerate(UpperCAmelCase ):
# adds PAD dummy token
__lowerCAmelCase: int = min_length + idx + 1
__lowerCAmelCase: Union[str, Any] = min_length + idx + 1
__lowerCAmelCase: Any = (
batch_size * num_beam_groups,
config.num_attention_heads,
tgt_len,
src_len,
)
# check attn size
self.assertListEqual(
[layer_attention.shape for layer_attention in iter_attentions] , [expected_shape] * len(UpperCAmelCase ) )
def UpperCAmelCase ( self : Dict , UpperCAmelCase : int , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : Any , UpperCAmelCase : Optional[Any] , UpperCAmelCase : str=False , UpperCAmelCase : Optional[int]=1 ) -> Union[str, Any]:
self.assertIsInstance(UpperCAmelCase , UpperCAmelCase )
self.assertListEqual(
[isinstance(UpperCAmelCase , UpperCAmelCase ) for iter_hidden_states in hidden_states] , [True] * len(UpperCAmelCase ) , )
self.assertEqual(len(UpperCAmelCase ) , (max_length - min_length) * num_beam_groups )
for idx, iter_hidden_states in enumerate(UpperCAmelCase ):
# adds PAD dummy token
__lowerCAmelCase: Any = min_length + idx + 1
__lowerCAmelCase: str = (batch_size * num_beam_groups, seq_len, config.hidden_size)
# check hidden size
self.assertListEqual(
[layer_hidden_states.shape for layer_hidden_states in iter_hidden_states] , [expected_shape] * len(UpperCAmelCase ) , )
pass
@slow
def UpperCAmelCase ( self : int ) -> Tuple:
for model_name in XLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowerCAmelCase: List[Any] = XLMModel.from_pretrained(UpperCAmelCase )
self.assertIsNotNone(UpperCAmelCase )
@require_torch
class A_ ( unittest.TestCase ):
@slow
def UpperCAmelCase ( self : Tuple ) -> Union[str, Any]:
__lowerCAmelCase: Union[str, Any] = XLMWithLMHeadModel.from_pretrained('xlm-mlm-en-2048' )
model.to(UpperCAmelCase )
__lowerCAmelCase: Optional[int] = torch.tensor([[1_4, 4_4_7]] , dtype=torch.long , device=UpperCAmelCase ) # the president
__lowerCAmelCase: Union[str, Any] = [
1_4,
4_4_7,
1_4,
4_4_7,
1_4,
4_4_7,
1_4,
4_4_7,
1_4,
4_4_7,
1_4,
4_4_7,
1_4,
4_4_7,
1_4,
4_4_7,
1_4,
4_4_7,
1_4,
4_4_7,
] # the president the president the president the president the president the president the president the president the president the president
# TODO(PVP): this and other input_ids I tried for generation give pretty bad results. Not sure why. Model might just not be made for auto-regressive inference
__lowerCAmelCase: str = model.generate(UpperCAmelCase , do_sample=UpperCAmelCase )
self.assertListEqual(output_ids[0].cpu().numpy().tolist() , UpperCAmelCase )
| 322 | 1 |
import gc
import unittest
from parameterized import parameterized
from diffusers import FlaxUNetaDConditionModel
from diffusers.utils import is_flax_available
from diffusers.utils.testing_utils import load_hf_numpy, require_flax, slow
if is_flax_available():
import jax
import jax.numpy as jnp
@slow
@require_flax
class A_ ( unittest.TestCase ):
def UpperCAmelCase ( self : Dict , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : Dict ) -> Tuple:
return F'''gaussian_noise_s={seed}_shape={'_'.join([str(UpperCAmelCase ) for s in shape] )}.npy'''
def UpperCAmelCase ( self : int ) -> Any:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
def UpperCAmelCase ( self : Union[str, Any] , UpperCAmelCase : Dict=0 , UpperCAmelCase : str=(4, 4, 6_4, 6_4) , UpperCAmelCase : List[str]=False ) -> int:
__lowerCAmelCase: List[Any] = jnp.bfloataa if fpaa else jnp.floataa
__lowerCAmelCase: Any = jnp.array(load_hf_numpy(self.get_file_format(UpperCAmelCase , UpperCAmelCase ) ) , dtype=UpperCAmelCase )
return image
def UpperCAmelCase ( self : Any , UpperCAmelCase : Optional[int]=False , UpperCAmelCase : str="CompVis/stable-diffusion-v1-4" ) -> Dict:
__lowerCAmelCase: str = jnp.bfloataa if fpaa else jnp.floataa
__lowerCAmelCase: Union[str, Any] = 'bf16' if fpaa else None
__lowerCAmelCase , __lowerCAmelCase: int = FlaxUNetaDConditionModel.from_pretrained(
UpperCAmelCase , subfolder='unet' , dtype=UpperCAmelCase , revision=UpperCAmelCase )
return model, params
def UpperCAmelCase ( self : Dict , UpperCAmelCase : Optional[int]=0 , UpperCAmelCase : List[Any]=(4, 7_7, 7_6_8) , UpperCAmelCase : Any=False ) -> List[str]:
__lowerCAmelCase: Dict = jnp.bfloataa if fpaa else jnp.floataa
__lowerCAmelCase: List[str] = jnp.array(load_hf_numpy(self.get_file_format(UpperCAmelCase , UpperCAmelCase ) ) , dtype=UpperCAmelCase )
return hidden_states
@parameterized.expand(
[
# fmt: off
[8_3, 4, [-0.2323, -0.1304, 0.0813, -0.3093, -0.0919, -0.1571, -0.1125, -0.5806]],
[1_7, 0.55, [-0.0831, -0.2443, 0.0901, -0.0919, 0.3396, 0.0103, -0.3743, 0.0701]],
[8, 0.89, [-0.4863, 0.0859, 0.0875, -0.1658, 0.9199, -0.0114, 0.4839, 0.4639]],
[3, 1_0_0_0, [-0.5649, 0.2402, -0.5518, 0.1248, 1.1328, -0.2443, -0.0325, -1.0078]],
# fmt: on
] )
def UpperCAmelCase ( self : Optional[int] , UpperCAmelCase : Dict , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : Union[str, Any] ) -> Any:
__lowerCAmelCase , __lowerCAmelCase: Union[str, Any] = self.get_unet_model(model_id='CompVis/stable-diffusion-v1-4' , fpaa=UpperCAmelCase )
__lowerCAmelCase: str = self.get_latents(UpperCAmelCase , fpaa=UpperCAmelCase )
__lowerCAmelCase: List[Any] = self.get_encoder_hidden_states(UpperCAmelCase , fpaa=UpperCAmelCase )
__lowerCAmelCase: List[str] = model.apply(
{'params': params} , UpperCAmelCase , jnp.array(UpperCAmelCase , dtype=jnp.intaa ) , encoder_hidden_states=UpperCAmelCase , ).sample
assert sample.shape == latents.shape
__lowerCAmelCase: Optional[Any] = jnp.asarray(jax.device_get((sample[-1, -2:, -2:, :2].flatten()) ) , dtype=jnp.floataa )
__lowerCAmelCase: List[Any] = jnp.array(UpperCAmelCase , dtype=jnp.floataa )
# Found torch (float16) and flax (bfloat16) outputs to be within this tolerance, in the same hardware
assert jnp.allclose(UpperCAmelCase , UpperCAmelCase , atol=1E-2 )
@parameterized.expand(
[
# fmt: off
[8_3, 4, [0.1514, 0.0807, 0.1624, 0.1016, -0.1896, 0.0263, 0.0677, 0.2310]],
[1_7, 0.55, [0.1164, -0.0216, 0.0170, 0.1589, -0.3120, 0.1005, -0.0581, -0.1458]],
[8, 0.89, [-0.1758, -0.0169, 0.1004, -0.1411, 0.1312, 0.1103, -0.1996, 0.2139]],
[3, 1_0_0_0, [0.1214, 0.0352, -0.0731, -0.1562, -0.0994, -0.0906, -0.2340, -0.0539]],
# fmt: on
] )
def UpperCAmelCase ( self : Union[str, Any] , UpperCAmelCase : Any , UpperCAmelCase : Tuple , UpperCAmelCase : Any ) -> List[Any]:
__lowerCAmelCase , __lowerCAmelCase: List[Any] = self.get_unet_model(model_id='stabilityai/stable-diffusion-2' , fpaa=UpperCAmelCase )
__lowerCAmelCase: Any = self.get_latents(UpperCAmelCase , shape=(4, 4, 9_6, 9_6) , fpaa=UpperCAmelCase )
__lowerCAmelCase: Union[str, Any] = self.get_encoder_hidden_states(UpperCAmelCase , shape=(4, 7_7, 1_0_2_4) , fpaa=UpperCAmelCase )
__lowerCAmelCase: str = model.apply(
{'params': params} , UpperCAmelCase , jnp.array(UpperCAmelCase , dtype=jnp.intaa ) , encoder_hidden_states=UpperCAmelCase , ).sample
assert sample.shape == latents.shape
__lowerCAmelCase: str = jnp.asarray(jax.device_get((sample[-1, -2:, -2:, :2].flatten()) ) , dtype=jnp.floataa )
__lowerCAmelCase: str = jnp.array(UpperCAmelCase , dtype=jnp.floataa )
# Found torch (float16) and flax (bfloat16) outputs to be within this tolerance, on the same hardware
assert jnp.allclose(UpperCAmelCase , UpperCAmelCase , atol=1E-2 )
| 322 |
def _a ( SCREAMING_SNAKE_CASE : Optional[int] ) -> Optional[int]:
"""simple docstring"""
__lowerCAmelCase: List[Any] = 0
__lowerCAmelCase: Optional[int] = len(SCREAMING_SNAKE_CASE )
for i in range(n - 1 ):
for j in range(i + 1 , SCREAMING_SNAKE_CASE ):
if arr[i] > arr[j]:
num_inversions += 1
return num_inversions
def _a ( SCREAMING_SNAKE_CASE : Any ) -> str:
"""simple docstring"""
if len(SCREAMING_SNAKE_CASE ) <= 1:
return arr, 0
__lowerCAmelCase: str = len(SCREAMING_SNAKE_CASE ) // 2
__lowerCAmelCase: str = arr[0:mid]
__lowerCAmelCase: int = arr[mid:]
__lowerCAmelCase , __lowerCAmelCase: List[Any] = count_inversions_recursive(SCREAMING_SNAKE_CASE )
__lowerCAmelCase , __lowerCAmelCase: Dict = count_inversions_recursive(SCREAMING_SNAKE_CASE )
__lowerCAmelCase , __lowerCAmelCase: int = _count_cross_inversions(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
__lowerCAmelCase: int = inversion_p + inversions_q + cross_inversions
return c, num_inversions
def _a ( SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : Any ) -> Optional[int]:
"""simple docstring"""
__lowerCAmelCase: List[str] = []
__lowerCAmelCase: List[str] = 0
while i < len(SCREAMING_SNAKE_CASE ) and j < len(SCREAMING_SNAKE_CASE ):
if p[i] > q[j]:
# if P[1] > Q[j], then P[k] > Q[k] for all i < k <= len(P)
# These are all inversions. The claim emerges from the
# property that P is sorted.
num_inversion += len(SCREAMING_SNAKE_CASE ) - i
r.append(q[j] )
j += 1
else:
r.append(p[i] )
i += 1
if i < len(SCREAMING_SNAKE_CASE ):
r.extend(p[i:] )
else:
r.extend(q[j:] )
return r, num_inversion
def _a ( ) -> int:
"""simple docstring"""
__lowerCAmelCase: List[Any] = [10, 2, 1, 5, 5, 2, 11]
# this arr has 8 inversions:
# (10, 2), (10, 1), (10, 5), (10, 5), (10, 2), (2, 1), (5, 2), (5, 2)
__lowerCAmelCase: Tuple = count_inversions_bf(SCREAMING_SNAKE_CASE )
__lowerCAmelCase , __lowerCAmelCase: str = count_inversions_recursive(SCREAMING_SNAKE_CASE )
assert num_inversions_bf == num_inversions_recursive == 8
print('number of inversions = ' , SCREAMING_SNAKE_CASE )
# testing an array with zero inversion (a sorted arr_1)
arr_a.sort()
__lowerCAmelCase: Tuple = count_inversions_bf(SCREAMING_SNAKE_CASE )
__lowerCAmelCase , __lowerCAmelCase: Optional[Any] = count_inversions_recursive(SCREAMING_SNAKE_CASE )
assert num_inversions_bf == num_inversions_recursive == 0
print('number of inversions = ' , SCREAMING_SNAKE_CASE )
# an empty list should also have zero inversions
__lowerCAmelCase: int = []
__lowerCAmelCase: Any = count_inversions_bf(SCREAMING_SNAKE_CASE )
__lowerCAmelCase , __lowerCAmelCase: Dict = count_inversions_recursive(SCREAMING_SNAKE_CASE )
assert num_inversions_bf == num_inversions_recursive == 0
print('number of inversions = ' , SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
main()
| 322 | 1 |
import cva
import numpy as np
class A_ :
def __init__( self : List[str] , UpperCAmelCase : float , UpperCAmelCase : int ) -> Optional[Any]:
if k in (0.04, 0.06):
__lowerCAmelCase: Optional[Any] = k
__lowerCAmelCase: List[str] = window_size
else:
raise ValueError('invalid k value' )
def __str__( self : Union[str, Any] ) -> str:
return str(self.k )
def UpperCAmelCase ( self : Dict , UpperCAmelCase : str ) -> tuple[cva.Mat, list[list[int]]]:
__lowerCAmelCase: Optional[int] = cva.imread(UpperCAmelCase , 0 )
__lowerCAmelCase , __lowerCAmelCase: Tuple = img.shape
__lowerCAmelCase: list[list[int]] = []
__lowerCAmelCase: Optional[Any] = img.copy()
__lowerCAmelCase: Tuple = cva.cvtColor(UpperCAmelCase , cva.COLOR_GRAY2RGB )
__lowerCAmelCase , __lowerCAmelCase: Optional[Any] = np.gradient(UpperCAmelCase )
__lowerCAmelCase: Union[str, Any] = dx**2
__lowerCAmelCase: Union[str, Any] = dy**2
__lowerCAmelCase: Dict = dx * dy
__lowerCAmelCase: Dict = 0.04
__lowerCAmelCase: Optional[int] = self.window_size // 2
for y in range(UpperCAmelCase , h - offset ):
for x in range(UpperCAmelCase , w - offset ):
__lowerCAmelCase: str = ixx[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
__lowerCAmelCase: List[Any] = iyy[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
__lowerCAmelCase: str = ixy[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
__lowerCAmelCase: Optional[int] = (wxx * wyy) - (wxy**2)
__lowerCAmelCase: Any = wxx + wyy
__lowerCAmelCase: Dict = det - k * (trace**2)
# Can change the value
if r > 0.5:
corner_list.append([x, y, r] )
color_img.itemset((y, x, 0) , 0 )
color_img.itemset((y, x, 1) , 0 )
color_img.itemset((y, x, 2) , 2_5_5 )
return color_img, corner_list
if __name__ == "__main__":
_a = HarrisCorner(0.04, 3)
_a , _a = edge_detect.detect('''path_to_image''')
cva.imwrite('''detect.png''', color_img)
| 322 |
import tempfile
import torch
from diffusers import (
DEISMultistepScheduler,
DPMSolverMultistepScheduler,
DPMSolverSinglestepScheduler,
UniPCMultistepScheduler,
)
from .test_schedulers import SchedulerCommonTest
class A_ ( snake_case__ ):
_lowercase : int = (DPMSolverSinglestepScheduler,)
_lowercase : Optional[Any] = (('num_inference_steps', 2_5),)
def UpperCAmelCase ( self : Dict , **UpperCAmelCase : List[Any] ) -> Optional[Any]:
__lowerCAmelCase: Union[str, Any] = {
'num_train_timesteps': 1_0_0_0,
'beta_start': 0.0001,
'beta_end': 0.02,
'beta_schedule': 'linear',
'solver_order': 2,
'prediction_type': 'epsilon',
'thresholding': False,
'sample_max_value': 1.0,
'algorithm_type': 'dpmsolver++',
'solver_type': 'midpoint',
'lambda_min_clipped': -float('inf' ),
'variance_type': None,
}
config.update(**UpperCAmelCase )
return config
def UpperCAmelCase ( self : str , UpperCAmelCase : List[Any]=0 , **UpperCAmelCase : str ) -> Any:
__lowerCAmelCase: Optional[int] = dict(self.forward_default_kwargs )
__lowerCAmelCase: int = kwargs.pop('num_inference_steps' , UpperCAmelCase )
__lowerCAmelCase: int = self.dummy_sample
__lowerCAmelCase: Union[str, Any] = 0.1 * sample
__lowerCAmelCase: str = [residual + 0.2, residual + 0.15, residual + 0.10]
for scheduler_class in self.scheduler_classes:
__lowerCAmelCase: Union[str, Any] = self.get_scheduler_config(**UpperCAmelCase )
__lowerCAmelCase: Optional[Any] = scheduler_class(**UpperCAmelCase )
scheduler.set_timesteps(UpperCAmelCase )
# copy over dummy past residuals
__lowerCAmelCase: Optional[int] = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(UpperCAmelCase )
__lowerCAmelCase: Dict = scheduler_class.from_pretrained(UpperCAmelCase )
new_scheduler.set_timesteps(UpperCAmelCase )
# copy over dummy past residuals
__lowerCAmelCase: Optional[int] = dummy_past_residuals[: new_scheduler.config.solver_order]
__lowerCAmelCase , __lowerCAmelCase: Optional[int] = sample, sample
for t in range(UpperCAmelCase , time_step + scheduler.config.solver_order + 1 ):
__lowerCAmelCase: str = scheduler.step(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , **UpperCAmelCase ).prev_sample
__lowerCAmelCase: str = new_scheduler.step(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , **UpperCAmelCase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def UpperCAmelCase ( self : str ) -> str:
pass
def UpperCAmelCase ( self : Optional[Any] , UpperCAmelCase : Any=0 , **UpperCAmelCase : Optional[int] ) -> Tuple:
__lowerCAmelCase: Tuple = dict(self.forward_default_kwargs )
__lowerCAmelCase: Tuple = kwargs.pop('num_inference_steps' , UpperCAmelCase )
__lowerCAmelCase: Tuple = self.dummy_sample
__lowerCAmelCase: Union[str, Any] = 0.1 * sample
__lowerCAmelCase: Tuple = [residual + 0.2, residual + 0.15, residual + 0.10]
for scheduler_class in self.scheduler_classes:
__lowerCAmelCase: Dict = self.get_scheduler_config()
__lowerCAmelCase: Any = scheduler_class(**UpperCAmelCase )
scheduler.set_timesteps(UpperCAmelCase )
# copy over dummy past residuals (must be after setting timesteps)
__lowerCAmelCase: List[Any] = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(UpperCAmelCase )
__lowerCAmelCase: List[str] = scheduler_class.from_pretrained(UpperCAmelCase )
# copy over dummy past residuals
new_scheduler.set_timesteps(UpperCAmelCase )
# copy over dummy past residual (must be after setting timesteps)
__lowerCAmelCase: Optional[Any] = dummy_past_residuals[: new_scheduler.config.solver_order]
__lowerCAmelCase: Any = scheduler.step(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , **UpperCAmelCase ).prev_sample
__lowerCAmelCase: Dict = new_scheduler.step(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , **UpperCAmelCase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def UpperCAmelCase ( self : int , UpperCAmelCase : Dict=None , **UpperCAmelCase : List[str] ) -> Union[str, Any]:
if scheduler is None:
__lowerCAmelCase: str = self.scheduler_classes[0]
__lowerCAmelCase: int = self.get_scheduler_config(**UpperCAmelCase )
__lowerCAmelCase: Any = scheduler_class(**UpperCAmelCase )
__lowerCAmelCase: List[Any] = self.scheduler_classes[0]
__lowerCAmelCase: List[str] = self.get_scheduler_config(**UpperCAmelCase )
__lowerCAmelCase: Optional[Any] = scheduler_class(**UpperCAmelCase )
__lowerCAmelCase: List[Any] = 1_0
__lowerCAmelCase: Dict = self.dummy_model()
__lowerCAmelCase: Dict = self.dummy_sample_deter
scheduler.set_timesteps(UpperCAmelCase )
for i, t in enumerate(scheduler.timesteps ):
__lowerCAmelCase: Dict = model(UpperCAmelCase , UpperCAmelCase )
__lowerCAmelCase: List[Any] = scheduler.step(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ).prev_sample
return sample
def UpperCAmelCase ( self : List[str] ) -> Union[str, Any]:
__lowerCAmelCase: List[str] = DPMSolverSinglestepScheduler(**self.get_scheduler_config() )
__lowerCAmelCase: Any = 5_0
__lowerCAmelCase: int = self.dummy_model()
__lowerCAmelCase: List[str] = self.dummy_sample_deter
scheduler.set_timesteps(UpperCAmelCase )
# make sure that the first t is uneven
for i, t in enumerate(scheduler.timesteps[3:] ):
__lowerCAmelCase: List[Any] = model(UpperCAmelCase , UpperCAmelCase )
__lowerCAmelCase: List[Any] = scheduler.step(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ).prev_sample
__lowerCAmelCase: Optional[int] = torch.mean(torch.abs(UpperCAmelCase ) )
assert abs(result_mean.item() - 0.2574 ) < 1E-3
def UpperCAmelCase ( self : Optional[int] ) -> Dict:
for timesteps in [2_5, 5_0, 1_0_0, 9_9_9, 1_0_0_0]:
self.check_over_configs(num_train_timesteps=UpperCAmelCase )
def UpperCAmelCase ( self : Optional[Any] ) -> Any:
# make sure that iterating over schedulers with same config names gives same results
# for defaults
__lowerCAmelCase: List[str] = DPMSolverSinglestepScheduler(**self.get_scheduler_config() )
__lowerCAmelCase: Dict = self.full_loop(scheduler=UpperCAmelCase )
__lowerCAmelCase: Optional[Any] = torch.mean(torch.abs(UpperCAmelCase ) )
assert abs(result_mean.item() - 0.2791 ) < 1E-3
__lowerCAmelCase: Tuple = DEISMultistepScheduler.from_config(scheduler.config )
__lowerCAmelCase: List[str] = DPMSolverMultistepScheduler.from_config(scheduler.config )
__lowerCAmelCase: Any = UniPCMultistepScheduler.from_config(scheduler.config )
__lowerCAmelCase: Optional[int] = DPMSolverSinglestepScheduler.from_config(scheduler.config )
__lowerCAmelCase: Union[str, Any] = self.full_loop(scheduler=UpperCAmelCase )
__lowerCAmelCase: List[Any] = torch.mean(torch.abs(UpperCAmelCase ) )
assert abs(result_mean.item() - 0.2791 ) < 1E-3
def UpperCAmelCase ( self : List[str] ) -> List[str]:
self.check_over_configs(thresholding=UpperCAmelCase )
for order in [1, 2, 3]:
for solver_type in ["midpoint", "heun"]:
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
thresholding=UpperCAmelCase , prediction_type=UpperCAmelCase , sample_max_value=UpperCAmelCase , algorithm_type='dpmsolver++' , solver_order=UpperCAmelCase , solver_type=UpperCAmelCase , )
def UpperCAmelCase ( self : Any ) -> Union[str, Any]:
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=UpperCAmelCase )
def UpperCAmelCase ( self : Tuple ) -> str:
for algorithm_type in ["dpmsolver", "dpmsolver++"]:
for solver_type in ["midpoint", "heun"]:
for order in [1, 2, 3]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
solver_order=UpperCAmelCase , solver_type=UpperCAmelCase , prediction_type=UpperCAmelCase , algorithm_type=UpperCAmelCase , )
__lowerCAmelCase: Dict = self.full_loop(
solver_order=UpperCAmelCase , solver_type=UpperCAmelCase , prediction_type=UpperCAmelCase , algorithm_type=UpperCAmelCase , )
assert not torch.isnan(UpperCAmelCase ).any(), "Samples have nan numbers"
def UpperCAmelCase ( self : Optional[Any] ) -> str:
self.check_over_configs(lower_order_final=UpperCAmelCase )
self.check_over_configs(lower_order_final=UpperCAmelCase )
def UpperCAmelCase ( self : str ) -> Any:
self.check_over_configs(lambda_min_clipped=-float('inf' ) )
self.check_over_configs(lambda_min_clipped=-5.1 )
def UpperCAmelCase ( self : List[Any] ) -> str:
self.check_over_configs(variance_type=UpperCAmelCase )
self.check_over_configs(variance_type='learned_range' )
def UpperCAmelCase ( self : Union[str, Any] ) -> List[Any]:
for num_inference_steps in [1, 2, 3, 5, 1_0, 5_0, 1_0_0, 9_9_9, 1_0_0_0]:
self.check_over_forward(num_inference_steps=UpperCAmelCase , time_step=0 )
def UpperCAmelCase ( self : Any ) -> int:
__lowerCAmelCase: Any = self.full_loop()
__lowerCAmelCase: Tuple = torch.mean(torch.abs(UpperCAmelCase ) )
assert abs(result_mean.item() - 0.2791 ) < 1E-3
def UpperCAmelCase ( self : Any ) -> Union[str, Any]:
__lowerCAmelCase: List[str] = self.full_loop(use_karras_sigmas=UpperCAmelCase )
__lowerCAmelCase: str = torch.mean(torch.abs(UpperCAmelCase ) )
assert abs(result_mean.item() - 0.2248 ) < 1E-3
def UpperCAmelCase ( self : Dict ) -> Optional[Any]:
__lowerCAmelCase: Tuple = self.full_loop(prediction_type='v_prediction' )
__lowerCAmelCase: List[str] = torch.mean(torch.abs(UpperCAmelCase ) )
assert abs(result_mean.item() - 0.1453 ) < 1E-3
def UpperCAmelCase ( self : str ) -> List[str]:
__lowerCAmelCase: int = self.full_loop(prediction_type='v_prediction' , use_karras_sigmas=UpperCAmelCase )
__lowerCAmelCase: Tuple = torch.mean(torch.abs(UpperCAmelCase ) )
assert abs(result_mean.item() - 0.0649 ) < 1E-3
def UpperCAmelCase ( self : Optional[Any] ) -> Optional[Any]:
__lowerCAmelCase: Any = self.scheduler_classes[0]
__lowerCAmelCase: Optional[Any] = self.get_scheduler_config(thresholding=UpperCAmelCase , dynamic_thresholding_ratio=0 )
__lowerCAmelCase: List[str] = scheduler_class(**UpperCAmelCase )
__lowerCAmelCase: Optional[int] = 1_0
__lowerCAmelCase: Union[str, Any] = self.dummy_model()
__lowerCAmelCase: int = self.dummy_sample_deter.half()
scheduler.set_timesteps(UpperCAmelCase )
for i, t in enumerate(scheduler.timesteps ):
__lowerCAmelCase: Any = model(UpperCAmelCase , UpperCAmelCase )
__lowerCAmelCase: Any = scheduler.step(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ).prev_sample
assert sample.dtype == torch.floataa
| 322 | 1 |
import logging
import re
import pytorch_quantization
import pytorch_quantization.nn as quant_nn
import torch
from pytorch_quantization import calib
from pytorch_quantization.tensor_quant import QuantDescriptor
_a = logging.getLogger(__name__)
_a = 5_0 # max width of layer names
_a = 7_0 # max width of quantizer names
def _a ( SCREAMING_SNAKE_CASE : str ) -> List[str]:
"""simple docstring"""
__lowerCAmelCase: List[Any] = parser.add_argument_group('quant_trainer arguments' )
group.add_argument('--wprec' , type=SCREAMING_SNAKE_CASE , default=8 , help='weight precision' )
group.add_argument('--aprec' , type=SCREAMING_SNAKE_CASE , default=8 , help='activation precision' )
group.add_argument('--quant-per-tensor' , action='store_true' , help='per tensor weight scaling' )
group.add_argument('--quant-disable' , action='store_true' , help='disable all quantizers' )
group.add_argument('--quant-disable-embeddings' , action='store_true' , help='disable all embeddings quantizers' )
group.add_argument('--quant-disable-keyword' , type=SCREAMING_SNAKE_CASE , nargs='+' , help='disable quantizers by keyword' )
group.add_argument('--quant-disable-layer-module' , type=SCREAMING_SNAKE_CASE , help='disable quantizers by keyword under layer.' )
group.add_argument('--quant-enable-layer-module' , type=SCREAMING_SNAKE_CASE , help='enable quantizers by keyword under layer' )
group.add_argument('--calibrator' , default='max' , help='which quantization range calibrator to use' )
group.add_argument('--percentile' , default=SCREAMING_SNAKE_CASE , type=SCREAMING_SNAKE_CASE , help='percentile for PercentileCalibrator' )
group.add_argument('--fuse-qkv' , action='store_true' , help='use the same scale factor for qkv' )
group.add_argument('--clip-gelu' , metavar='N' , type=SCREAMING_SNAKE_CASE , help='clip gelu output maximum value to N' )
group.add_argument(
'--recalibrate-weights' , action='store_true' , help=(
'recalibrate weight amaxes by taking the max of the weights.'
' amaxes will be computed with the current quantization granularity (axis).'
) , )
def _a ( SCREAMING_SNAKE_CASE : str ) -> List[Any]:
"""simple docstring"""
if args.calibrator == "max":
__lowerCAmelCase: List[str] = 'max'
elif args.calibrator == "percentile":
if args.percentile is None:
raise ValueError('Specify --percentile when using percentile calibrator' )
__lowerCAmelCase: int = 'histogram'
elif args.calibrator == "mse":
__lowerCAmelCase: Dict = 'histogram'
else:
raise ValueError(f'''Invalid calibrator {args.calibrator}''' )
__lowerCAmelCase: int = QuantDescriptor(num_bits=args.aprec , calib_method=SCREAMING_SNAKE_CASE )
__lowerCAmelCase: List[str] = QuantDescriptor(num_bits=args.wprec , axis=(None if args.quant_per_tensor else (0,)) )
quant_nn.QuantLinear.set_default_quant_desc_input(SCREAMING_SNAKE_CASE )
quant_nn.QuantLinear.set_default_quant_desc_weight(SCREAMING_SNAKE_CASE )
def _a ( SCREAMING_SNAKE_CASE : Union[str, Any] , SCREAMING_SNAKE_CASE : Dict , SCREAMING_SNAKE_CASE : Optional[int]=False , SCREAMING_SNAKE_CASE : Tuple=False ) -> Optional[Any]:
"""simple docstring"""
logger.info('Configuring Model for Quantization' )
logger.info(f'''using quantization package {pytorch_quantization.__file__}''' )
if not calib:
if args.quant_disable_embeddings:
set_quantizer_by_name(SCREAMING_SNAKE_CASE , ['embeddings'] , which='weight' , _disabled=SCREAMING_SNAKE_CASE )
if args.quant_disable:
set_quantizer_by_name(SCREAMING_SNAKE_CASE , [''] , _disabled=SCREAMING_SNAKE_CASE )
if args.quant_disable_keyword:
set_quantizer_by_name(SCREAMING_SNAKE_CASE , args.quant_disable_keyword , _disabled=SCREAMING_SNAKE_CASE )
if args.quant_disable_layer_module:
set_quantizer_by_name(SCREAMING_SNAKE_CASE , [R'layer.\d+.' + args.quant_disable_layer_module] , _disabled=SCREAMING_SNAKE_CASE )
if args.quant_enable_layer_module:
set_quantizer_by_name(SCREAMING_SNAKE_CASE , [R'layer.\d+.' + args.quant_enable_layer_module] , _disabled=SCREAMING_SNAKE_CASE )
if args.recalibrate_weights:
recalibrate_weights(SCREAMING_SNAKE_CASE )
if args.fuse_qkv:
fuse_qkv(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
if args.clip_gelu:
clip_gelu(SCREAMING_SNAKE_CASE , args.clip_gelu )
# if args.local_rank in [-1, 0] and not calib:
print_quant_summary(SCREAMING_SNAKE_CASE )
def _a ( SCREAMING_SNAKE_CASE : str ) -> List[Any]:
"""simple docstring"""
logger.info('Enabling Calibration' )
for name, module in model.named_modules():
if name.endswith('_quantizer' ):
if module._calibrator is not None:
module.disable_quant()
module.enable_calib()
else:
module.disable()
logger.info(f'''{name:80}: {module}''' )
def _a ( SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : List[str] ) -> str:
"""simple docstring"""
logger.info('Loading calibrated amax' )
for name, module in model.named_modules():
if name.endswith('_quantizer' ):
if module._calibrator is not None:
if isinstance(module._calibrator , calib.MaxCalibrator ):
module.load_calib_amax()
else:
module.load_calib_amax('percentile' , percentile=args.percentile )
module.enable_quant()
module.disable_calib()
else:
module.enable()
model.cuda()
print_quant_summary(SCREAMING_SNAKE_CASE )
def _a ( SCREAMING_SNAKE_CASE : Union[str, Any] , SCREAMING_SNAKE_CASE : str ) -> Tuple:
"""simple docstring"""
def fusea(SCREAMING_SNAKE_CASE : Union[str, Any] , SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : Union[str, Any] ):
for mod in [qq, qk, qv]:
if not hasattr(SCREAMING_SNAKE_CASE , '_amax' ):
print(' WARNING: NO AMAX BUFFER' )
return
__lowerCAmelCase: Optional[int] = qq._amax.detach().item()
__lowerCAmelCase: Any = qk._amax.detach().item()
__lowerCAmelCase: Tuple = qv._amax.detach().item()
__lowerCAmelCase: Any = max(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
qq._amax.fill_(SCREAMING_SNAKE_CASE )
qk._amax.fill_(SCREAMING_SNAKE_CASE )
qv._amax.fill_(SCREAMING_SNAKE_CASE )
logger.info(f''' q={q:5.2f} k={k:5.2f} v={v:5.2f} -> {amax:5.2f}''' )
for name, mod in model.named_modules():
if name.endswith('.attention.self' ):
logger.info(f'''FUSE_QKV: {name:{name_width}}''' )
fusea(mod.matmul_q_input_quantizer , mod.matmul_k_input_quantizer , mod.matmul_v_input_quantizer )
if args.quant_per_tensor:
fusea(mod.query._weight_quantizer , mod.key._weight_quantizer , mod.value._weight_quantizer )
def _a ( SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : List[str] ) -> Optional[int]:
"""simple docstring"""
for name, mod in model.named_modules():
if name.endswith('.output.dense' ) and not name.endswith('attention.output.dense' ):
__lowerCAmelCase: Dict = mod._input_quantizer._amax.data.detach().item()
mod._input_quantizer._amax.data.detach().clamp_(max=SCREAMING_SNAKE_CASE )
__lowerCAmelCase: Optional[int] = mod._input_quantizer._amax.data.detach().item()
logger.info(f'''CLIP_GELU: {name:{name_width}} amax: {amax_init:5.2f} -> {amax:5.2f}''' )
def _a ( SCREAMING_SNAKE_CASE : int ) -> Any:
"""simple docstring"""
for name, mod in model.named_modules():
if hasattr(SCREAMING_SNAKE_CASE , '_weight_quantizer' ) and mod._weight_quantizer.axis is not None:
__lowerCAmelCase: Any = mod.weight.shape[0]
__lowerCAmelCase: Tuple = mod._weight_quantizer._amax.detach()
__lowerCAmelCase: Dict = torch.ones(SCREAMING_SNAKE_CASE , dtype=amax.dtype , device=amax.device ) * amax
print(f'''expanding {name} {amax} -> {mod._weight_quantizer._amax}''' )
def _a ( SCREAMING_SNAKE_CASE : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
for name, mod in model.named_modules():
if hasattr(SCREAMING_SNAKE_CASE , '_weight_quantizer' ):
if not hasattr(mod.weight_quantizer , '_amax' ):
print('RECALIB: {name:{name_width}} WARNING: NO AMAX BUFFER' )
continue
# determine which axes to reduce across
# e.g. a 4D tensor quantized per axis 0 should reduce over (1,2,3)
__lowerCAmelCase: Any = set() if mod._weight_quantizer.axis is None else set(mod._weight_quantizer.axis )
__lowerCAmelCase: Union[str, Any] = set(range(len(mod.weight.size() ) ) ) - axis_set
__lowerCAmelCase: Union[str, Any] = pytorch_quantization.utils.reduce_amax(mod.weight , axis=SCREAMING_SNAKE_CASE , keepdims=SCREAMING_SNAKE_CASE ).detach()
logger.info(f'''RECALIB: {name:{name_width}} {mod._weight_quantizer._amax.flatten()} -> {amax.flatten()}''' )
__lowerCAmelCase: Union[str, Any] = amax
def _a ( SCREAMING_SNAKE_CASE : List[Any] , SCREAMING_SNAKE_CASE : int=25 , SCREAMING_SNAKE_CASE : List[str]=1_80 , SCREAMING_SNAKE_CASE : List[Any]=None ) -> Dict:
"""simple docstring"""
if ignore is None:
__lowerCAmelCase: Any = []
elif not isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
__lowerCAmelCase: Any = [ignore]
__lowerCAmelCase: List[Any] = 0
for name, mod in model.named_modules():
if not hasattr(SCREAMING_SNAKE_CASE , 'weight' ):
continue
__lowerCAmelCase: List[str] = max(SCREAMING_SNAKE_CASE , len(SCREAMING_SNAKE_CASE ) )
for name, mod in model.named_modules():
__lowerCAmelCase: List[Any] = getattr(SCREAMING_SNAKE_CASE , '_input_quantizer' , SCREAMING_SNAKE_CASE )
__lowerCAmelCase: Optional[Any] = getattr(SCREAMING_SNAKE_CASE , '_weight_quantizer' , SCREAMING_SNAKE_CASE )
if not hasattr(SCREAMING_SNAKE_CASE , 'weight' ):
continue
if type(SCREAMING_SNAKE_CASE ) in ignore:
continue
if [True for s in ignore if type(SCREAMING_SNAKE_CASE ) is str and s in name]:
continue
__lowerCAmelCase: Tuple = f'''Act:{input_q.extra_repr()}'''
__lowerCAmelCase: Optional[Any] = f'''Wgt:{weight_q.extra_repr()}'''
__lowerCAmelCase: Any = f'''{name:{name_width}} {act_str} {wgt_str}'''
if len(SCREAMING_SNAKE_CASE ) <= line_width:
logger.info(SCREAMING_SNAKE_CASE )
else:
logger.info(f'''{name:{name_width}} {act_str}''' )
logger.info(f'''{' ':{name_width}} {wgt_str}''' )
def _a ( SCREAMING_SNAKE_CASE : Dict ) -> str:
"""simple docstring"""
__lowerCAmelCase: str = 0
for name, mod in model.named_modules():
if isinstance(SCREAMING_SNAKE_CASE , pytorch_quantization.nn.TensorQuantizer ):
print(f'''{name:80} {mod}''' )
count += 1
print(f'''{count} TensorQuantizers found in model''' )
def _a ( SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : List[str] , SCREAMING_SNAKE_CASE : Dict , SCREAMING_SNAKE_CASE : Any , SCREAMING_SNAKE_CASE : List[str] ) -> Union[str, Any]:
"""simple docstring"""
__lowerCAmelCase: int = getattr(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
if quantizer_mod is not None:
assert hasattr(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
setattr(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
else:
logger.warning(f'''{name} has no {quantizer}''' )
def _a ( SCREAMING_SNAKE_CASE : List[Any] , SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : Optional[Any]="both" , **SCREAMING_SNAKE_CASE : Any ) -> str:
"""simple docstring"""
__lowerCAmelCase: List[Any] = f'''Warning: changing {which} quantizers of {name:{qname_width}}'''
for k, v in kwargs.items():
s += f''' {k}={v}'''
if which in ["input", "both"]:
set_quantizer(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , '_input_quantizer' , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
if which in ["weight", "both"]:
set_quantizer(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , '_weight_quantizer' , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
logger.info(SCREAMING_SNAKE_CASE )
def _a ( SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : Optional[Any] , **SCREAMING_SNAKE_CASE : str ) -> Any:
"""simple docstring"""
for name, mod in model.named_modules():
if hasattr(SCREAMING_SNAKE_CASE , '_input_quantizer' ) or hasattr(SCREAMING_SNAKE_CASE , '_weight_quantizer' ):
for n in names:
if re.search(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
set_quantizers(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
elif name.endswith('_quantizer' ):
for n in names:
if re.search(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
__lowerCAmelCase: Union[str, Any] = f'''Warning: changing {name:{name_width}}'''
for k, v in kwargs.items():
s += f''' {k}={v}'''
setattr(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
logger.info(SCREAMING_SNAKE_CASE )
| 322 |
import re
import time
from typing import Optional
import IPython.display as disp
from ..trainer_callback import TrainerCallback
from ..trainer_utils import IntervalStrategy, has_length
def _a ( SCREAMING_SNAKE_CASE : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
__lowerCAmelCase: Union[str, Any] = int(SCREAMING_SNAKE_CASE )
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase: List[str] = t // 36_00, (t // 60) % 60, t % 60
return f'''{h}:{m:02d}:{s:02d}''' if h != 0 else f'''{m:02d}:{s:02d}'''
def _a ( SCREAMING_SNAKE_CASE : Dict , SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : Dict , SCREAMING_SNAKE_CASE : str=3_00 ) -> int:
"""simple docstring"""
return f'''
<div>
{prefix}
<progress value=\'{value}\' max=\'{total}\' style=\'width:{width}px; height:20px; vertical-align: middle;\'></progress>
{label}
</div>
'''
def _a ( SCREAMING_SNAKE_CASE : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
__lowerCAmelCase: List[str] = '<table border="1" class="dataframe">\n'
html_code += """ <thead>\n <tr style="text-align: left;">\n"""
for i in items[0]:
html_code += f''' <th>{i}</th>\n'''
html_code += " </tr>\n </thead>\n <tbody>\n"
for line in items[1:]:
html_code += " <tr>\n"
for elt in line:
__lowerCAmelCase: List[Any] = f'''{elt:.6f}''' if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) else str(SCREAMING_SNAKE_CASE )
html_code += f''' <td>{elt}</td>\n'''
html_code += " </tr>\n"
html_code += " </tbody>\n</table><p>"
return html_code
class A_ :
_lowercase : str = 5
_lowercase : str = 0.2
def __init__( self : Union[str, Any] , UpperCAmelCase : int , UpperCAmelCase : Optional[str] = None , UpperCAmelCase : bool = True , UpperCAmelCase : Optional["NotebookTrainingTracker"] = None , UpperCAmelCase : int = 3_0_0 , ) -> List[Any]:
__lowerCAmelCase: List[str] = total
__lowerCAmelCase: Optional[int] = '' if prefix is None else prefix
__lowerCAmelCase: int = leave
__lowerCAmelCase: List[str] = parent
__lowerCAmelCase: Optional[Any] = width
__lowerCAmelCase: List[str] = None
__lowerCAmelCase: Dict = None
__lowerCAmelCase: List[str] = None
def UpperCAmelCase ( self : Union[str, Any] , UpperCAmelCase : int , UpperCAmelCase : bool = False , UpperCAmelCase : str = None ) -> Optional[int]:
__lowerCAmelCase: int = value
if comment is not None:
__lowerCAmelCase: Any = comment
if self.last_value is None:
__lowerCAmelCase: List[Any] = time.time()
__lowerCAmelCase: Any = value
__lowerCAmelCase: List[str] = None
__lowerCAmelCase: Dict = self.warmup
__lowerCAmelCase: List[str] = 1
self.update_bar(UpperCAmelCase )
elif value <= self.last_value and not force_update:
return
elif force_update or self.first_calls > 0 or value >= min(self.last_value + self.wait_for , self.total ):
if self.first_calls > 0:
self.first_calls -= 1
__lowerCAmelCase: Union[str, Any] = time.time()
__lowerCAmelCase: str = current_time - self.start_time
# We could have value = self.start_value if the update is called twixe with the same start value.
if value > self.start_value:
__lowerCAmelCase: Dict = self.elapsed_time / (value - self.start_value)
else:
__lowerCAmelCase: int = None
if value >= self.total:
__lowerCAmelCase: Any = self.total
__lowerCAmelCase: str = None
if not self.leave:
self.close()
elif self.average_time_per_item is not None:
__lowerCAmelCase: List[str] = self.average_time_per_item * (self.total - value)
self.update_bar(UpperCAmelCase )
__lowerCAmelCase: Tuple = value
__lowerCAmelCase: int = current_time
if self.average_time_per_item is None:
__lowerCAmelCase: Optional[int] = 1
else:
__lowerCAmelCase: Optional[Any] = max(int(self.update_every / self.average_time_per_item ) , 1 )
def UpperCAmelCase ( self : int , UpperCAmelCase : Any , UpperCAmelCase : List[Any]=None ) -> Union[str, Any]:
__lowerCAmelCase: int = ' ' * (len(str(self.total ) ) - len(str(UpperCAmelCase ) )) + str(UpperCAmelCase )
if self.elapsed_time is None:
__lowerCAmelCase: Dict = F'''[{spaced_value}/{self.total} : < :'''
elif self.predicted_remaining is None:
__lowerCAmelCase: str = F'''[{spaced_value}/{self.total} {format_time(self.elapsed_time )}'''
else:
__lowerCAmelCase: Any = (
F'''[{spaced_value}/{self.total} {format_time(self.elapsed_time )} <'''
F''' {format_time(self.predicted_remaining )}'''
)
self.label += F''', {1/self.average_time_per_item:.2f} it/s'''
self.label += "]" if self.comment is None or len(self.comment ) == 0 else F''', {self.comment}]'''
self.display()
def UpperCAmelCase ( self : Any ) -> Optional[Any]:
__lowerCAmelCase: Any = html_progress_bar(self.value , self.total , self.prefix , self.label , self.width )
if self.parent is not None:
# If this is a child bar, the parent will take care of the display.
self.parent.display()
return
if self.output is None:
__lowerCAmelCase: Tuple = disp.display(disp.HTML(self.html_code ) , display_id=UpperCAmelCase )
else:
self.output.update(disp.HTML(self.html_code ) )
def UpperCAmelCase ( self : str ) -> Optional[Any]:
if self.parent is None and self.output is not None:
self.output.update(disp.HTML('' ) )
class A_ ( snake_case__ ):
def __init__( self : Tuple , UpperCAmelCase : Dict , UpperCAmelCase : List[Any]=None ) -> Any:
super().__init__(UpperCAmelCase )
__lowerCAmelCase: Tuple = None if column_names is None else [column_names]
__lowerCAmelCase: Union[str, Any] = None
def UpperCAmelCase ( self : Union[str, Any] ) -> Any:
__lowerCAmelCase: str = html_progress_bar(self.value , self.total , self.prefix , self.label , self.width )
if self.inner_table is not None:
self.html_code += text_to_html_table(self.inner_table )
if self.child_bar is not None:
self.html_code += self.child_bar.html_code
if self.output is None:
__lowerCAmelCase: Optional[Any] = disp.display(disp.HTML(self.html_code ) , display_id=UpperCAmelCase )
else:
self.output.update(disp.HTML(self.html_code ) )
def UpperCAmelCase ( self : Tuple , UpperCAmelCase : List[Any] ) -> Dict:
if self.inner_table is None:
__lowerCAmelCase: List[str] = [list(values.keys() ), list(values.values() )]
else:
__lowerCAmelCase: Any = self.inner_table[0]
if len(self.inner_table ) == 1:
# We give a chance to update the column names at the first iteration
for key in values.keys():
if key not in columns:
columns.append(UpperCAmelCase )
__lowerCAmelCase: List[Any] = columns
self.inner_table.append([values[c] for c in columns] )
def UpperCAmelCase ( self : Dict , UpperCAmelCase : int , UpperCAmelCase : List[Any]=None , UpperCAmelCase : List[str]=3_0_0 ) -> List[Any]:
__lowerCAmelCase: Union[str, Any] = NotebookProgressBar(UpperCAmelCase , prefix=UpperCAmelCase , parent=self , width=UpperCAmelCase )
return self.child_bar
def UpperCAmelCase ( self : Optional[int] ) -> Optional[int]:
__lowerCAmelCase: Tuple = None
self.display()
class A_ ( snake_case__ ):
def __init__( self : Any ) -> List[str]:
__lowerCAmelCase: int = None
__lowerCAmelCase: Optional[int] = None
__lowerCAmelCase: str = False
def UpperCAmelCase ( self : Tuple , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : Optional[int] , UpperCAmelCase : Any , **UpperCAmelCase : Tuple ) -> str:
__lowerCAmelCase: Tuple = 'Epoch' if args.evaluation_strategy == IntervalStrategy.EPOCH else 'Step'
__lowerCAmelCase: Optional[int] = 0
__lowerCAmelCase: Any = 0
__lowerCAmelCase: Tuple = [self.first_column] + ['Training Loss']
if args.evaluation_strategy != IntervalStrategy.NO:
column_names.append('Validation Loss' )
__lowerCAmelCase: List[Any] = NotebookTrainingTracker(state.max_steps , UpperCAmelCase )
def UpperCAmelCase ( self : Optional[int] , UpperCAmelCase : Tuple , UpperCAmelCase : Any , UpperCAmelCase : Optional[Any] , **UpperCAmelCase : Union[str, Any] ) -> Any:
__lowerCAmelCase: Union[str, Any] = int(state.epoch ) if int(state.epoch ) == state.epoch else F'''{state.epoch:.2f}'''
self.training_tracker.update(
state.global_step + 1 , comment=F'''Epoch {epoch}/{state.num_train_epochs}''' , force_update=self._force_next_update , )
__lowerCAmelCase: Any = False
def UpperCAmelCase ( self : List[Any] , UpperCAmelCase : str , UpperCAmelCase : Optional[int] , UpperCAmelCase : Optional[Any] , UpperCAmelCase : int=None , **UpperCAmelCase : Dict ) -> List[Any]:
if not has_length(UpperCAmelCase ):
return
if self.prediction_bar is None:
if self.training_tracker is not None:
__lowerCAmelCase: int = self.training_tracker.add_child(len(UpperCAmelCase ) )
else:
__lowerCAmelCase: List[str] = NotebookProgressBar(len(UpperCAmelCase ) )
self.prediction_bar.update(1 )
else:
self.prediction_bar.update(self.prediction_bar.value + 1 )
def UpperCAmelCase ( self : Union[str, Any] , UpperCAmelCase : Dict , UpperCAmelCase : Dict , UpperCAmelCase : Optional[Any] , **UpperCAmelCase : int ) -> Union[str, Any]:
if self.prediction_bar is not None:
self.prediction_bar.close()
__lowerCAmelCase: Any = None
def UpperCAmelCase ( self : str , UpperCAmelCase : Tuple , UpperCAmelCase : Optional[Any] , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : int=None , **UpperCAmelCase : Optional[Any] ) -> Optional[Any]:
# Only for when there is no evaluation
if args.evaluation_strategy == IntervalStrategy.NO and "loss" in logs:
__lowerCAmelCase: Union[str, Any] = {'Training Loss': logs['loss']}
# First column is necessarily Step sine we're not in epoch eval strategy
__lowerCAmelCase: Dict = state.global_step
self.training_tracker.write_line(UpperCAmelCase )
def UpperCAmelCase ( self : int , UpperCAmelCase : List[Any] , UpperCAmelCase : List[str] , UpperCAmelCase : Optional[int] , UpperCAmelCase : Tuple=None , **UpperCAmelCase : int ) -> List[str]:
if self.training_tracker is not None:
__lowerCAmelCase: Dict = {'Training Loss': 'No log', 'Validation Loss': 'No log'}
for log in reversed(state.log_history ):
if "loss" in log:
__lowerCAmelCase: List[str] = log['loss']
break
if self.first_column == "Epoch":
__lowerCAmelCase: int = int(state.epoch )
else:
__lowerCAmelCase: Tuple = state.global_step
__lowerCAmelCase: Optional[int] = 'eval'
for k in metrics:
if k.endswith('_loss' ):
__lowerCAmelCase: Union[str, Any] = re.sub(R'\_loss$' , '' , UpperCAmelCase )
__lowerCAmelCase: Optional[Any] = metrics.pop('total_flos' , UpperCAmelCase )
__lowerCAmelCase: str = metrics.pop('epoch' , UpperCAmelCase )
__lowerCAmelCase: int = metrics.pop(F'''{metric_key_prefix}_runtime''' , UpperCAmelCase )
__lowerCAmelCase: List[Any] = metrics.pop(F'''{metric_key_prefix}_samples_per_second''' , UpperCAmelCase )
__lowerCAmelCase: List[str] = metrics.pop(F'''{metric_key_prefix}_steps_per_second''' , UpperCAmelCase )
__lowerCAmelCase: Tuple = metrics.pop(F'''{metric_key_prefix}_jit_compilation_time''' , UpperCAmelCase )
for k, v in metrics.items():
if k == F'''{metric_key_prefix}_loss''':
__lowerCAmelCase: Tuple = v
else:
__lowerCAmelCase: int = k.split('_' )
__lowerCAmelCase: List[Any] = ' '.join([part.capitalize() for part in splits[1:]] )
__lowerCAmelCase: List[Any] = v
self.training_tracker.write_line(UpperCAmelCase )
self.training_tracker.remove_child()
__lowerCAmelCase: List[str] = None
# Evaluation takes a long time so we should force the next update.
__lowerCAmelCase: str = True
def UpperCAmelCase ( self : int , UpperCAmelCase : int , UpperCAmelCase : Tuple , UpperCAmelCase : Optional[int] , **UpperCAmelCase : Optional[int] ) -> Optional[int]:
self.training_tracker.update(
state.global_step , comment=F'''Epoch {int(state.epoch )}/{state.num_train_epochs}''' , force_update=UpperCAmelCase )
__lowerCAmelCase: Union[str, Any] = None
| 322 | 1 |
from __future__ import annotations
import math
def _a ( SCREAMING_SNAKE_CASE : int ) -> list[int]:
"""simple docstring"""
if num <= 0:
__lowerCAmelCase: Any = f'''{num}: Invalid input, please enter a positive integer.'''
raise ValueError(SCREAMING_SNAKE_CASE )
__lowerCAmelCase: Optional[Any] = [True] * (num + 1)
__lowerCAmelCase: List[str] = []
__lowerCAmelCase: Tuple = 2
__lowerCAmelCase: str = int(math.sqrt(SCREAMING_SNAKE_CASE ) )
while start <= end:
# If start is a prime
if sieve[start] is True:
prime.append(SCREAMING_SNAKE_CASE )
# Set multiples of start be False
for i in range(start * start , num + 1 , SCREAMING_SNAKE_CASE ):
if sieve[i] is True:
__lowerCAmelCase: List[str] = False
start += 1
for j in range(end + 1 , num + 1 ):
if sieve[j] is True:
prime.append(SCREAMING_SNAKE_CASE )
return prime
if __name__ == "__main__":
print(prime_sieve(int(input('''Enter a positive integer: ''').strip())))
| 322 |
import os
from datetime import datetime as dt
from github import Github
_a = [
'''good first issue''',
'''feature request''',
'''wip''',
]
def _a ( ) -> List[Any]:
"""simple docstring"""
__lowerCAmelCase: Dict = Github(os.environ['GITHUB_TOKEN'] )
__lowerCAmelCase: Tuple = g.get_repo('huggingface/accelerate' )
__lowerCAmelCase: str = repo.get_issues(state='open' )
for issue in open_issues:
__lowerCAmelCase: Optional[int] = sorted([comment for comment in issue.get_comments()] , key=lambda SCREAMING_SNAKE_CASE : i.created_at , reverse=SCREAMING_SNAKE_CASE )
__lowerCAmelCase: Dict = comments[0] if len(SCREAMING_SNAKE_CASE ) > 0 else None
__lowerCAmelCase: Tuple = dt.utcnow()
__lowerCAmelCase: Optional[int] = (current_time - issue.updated_at).days
__lowerCAmelCase: str = (current_time - issue.created_at).days
if (
last_comment is not None
and last_comment.user.login == "github-actions[bot]"
and days_since_updated > 7
and days_since_creation >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Close issue since it has been 7 days of inactivity since bot mention.
issue.edit(state='closed' )
elif (
days_since_updated > 23
and days_since_creation >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Add stale comment
issue.create_comment(
'This issue has been automatically marked as stale because it has not had '
'recent activity. If you think this still needs to be addressed '
'please comment on this thread.\n\nPlease note that issues that do not follow the '
'[contributing guidelines](https://github.com/huggingface/accelerate/blob/main/CONTRIBUTING.md) '
'are likely to be ignored.' )
if __name__ == "__main__":
main()
| 322 | 1 |
import os
import shutil
import tempfile
from unittest import TestCase
from unittest.mock import patch
import numpy as np
from datasets import Dataset
from transformers.models.realm.configuration_realm import RealmConfig
from transformers.models.realm.retrieval_realm import _REALM_BLOCK_RECORDS_FILENAME, RealmRetriever
from transformers.models.realm.tokenization_realm import VOCAB_FILES_NAMES, RealmTokenizer
class A_ ( snake_case__ ):
def UpperCAmelCase ( self : Optional[Any] ) -> Any:
__lowerCAmelCase: Any = tempfile.mkdtemp()
__lowerCAmelCase: Tuple = 5
# Realm tok
__lowerCAmelCase: Tuple = [
'[UNK]',
'[CLS]',
'[SEP]',
'[PAD]',
'[MASK]',
'test',
'question',
'this',
'is',
'the',
'first',
'second',
'third',
'fourth',
'fifth',
'record',
'want',
'##want',
'##ed',
'wa',
'un',
'runn',
'##ing',
',',
'low',
'lowest',
]
__lowerCAmelCase: Any = os.path.join(self.tmpdirname , 'realm_tokenizer' )
os.makedirs(UpperCAmelCase , exist_ok=UpperCAmelCase )
__lowerCAmelCase: List[Any] = os.path.join(UpperCAmelCase , VOCAB_FILES_NAMES['vocab_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) )
__lowerCAmelCase: int = os.path.join(self.tmpdirname , 'realm_block_records' )
os.makedirs(UpperCAmelCase , exist_ok=UpperCAmelCase )
def UpperCAmelCase ( self : Dict ) -> RealmTokenizer:
return RealmTokenizer.from_pretrained(os.path.join(self.tmpdirname , 'realm_tokenizer' ) )
def UpperCAmelCase ( self : Any ) -> Any:
shutil.rmtree(self.tmpdirname )
def UpperCAmelCase ( self : str ) -> Any:
__lowerCAmelCase: List[Any] = RealmConfig(num_block_records=self.num_block_records )
return config
def UpperCAmelCase ( self : Optional[Any] ) -> Dict:
__lowerCAmelCase: Optional[Any] = Dataset.from_dict(
{
'id': ['0', '1'],
'question': ['foo', 'bar'],
'answers': [['Foo', 'Bar'], ['Bar']],
} )
return dataset
def UpperCAmelCase ( self : Union[str, Any] ) -> List[str]:
__lowerCAmelCase: Union[str, Any] = np.array(
[
b'This is the first record',
b'This is the second record',
b'This is the third record',
b'This is the fourth record',
b'This is the fifth record',
b'This is a longer longer longer record',
] , dtype=UpperCAmelCase , )
return block_records
def UpperCAmelCase ( self : str ) -> List[str]:
__lowerCAmelCase: Tuple = RealmRetriever(
block_records=self.get_dummy_block_records() , tokenizer=self.get_tokenizer() , )
return retriever
def UpperCAmelCase ( self : Optional[int] ) -> List[Any]:
__lowerCAmelCase: Optional[int] = self.get_config()
__lowerCAmelCase: Dict = self.get_dummy_retriever()
__lowerCAmelCase: Optional[Any] = retriever.tokenizer
__lowerCAmelCase: Dict = np.array([0, 3] , dtype='long' )
__lowerCAmelCase: Union[str, Any] = tokenizer(['Test question'] ).input_ids
__lowerCAmelCase: Dict = tokenizer(
['the fourth'] , add_special_tokens=UpperCAmelCase , return_token_type_ids=UpperCAmelCase , return_attention_mask=UpperCAmelCase , ).input_ids
__lowerCAmelCase: Optional[Any] = config.reader_seq_len
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase: List[Any] = retriever(
UpperCAmelCase , UpperCAmelCase , answer_ids=UpperCAmelCase , max_length=UpperCAmelCase , return_tensors='np' )
self.assertEqual(len(UpperCAmelCase ) , 2 )
self.assertEqual(len(UpperCAmelCase ) , 2 )
self.assertEqual(len(UpperCAmelCase ) , 2 )
self.assertEqual(concat_inputs.input_ids.shape , (2, 1_0) )
self.assertEqual(concat_inputs.attention_mask.shape , (2, 1_0) )
self.assertEqual(concat_inputs.token_type_ids.shape , (2, 1_0) )
self.assertEqual(concat_inputs.special_tokens_mask.shape , (2, 1_0) )
self.assertEqual(
tokenizer.convert_ids_to_tokens(concat_inputs.input_ids[0] ) , ['[CLS]', 'test', 'question', '[SEP]', 'this', 'is', 'the', 'first', 'record', '[SEP]'] , )
self.assertEqual(
tokenizer.convert_ids_to_tokens(concat_inputs.input_ids[1] ) , ['[CLS]', 'test', 'question', '[SEP]', 'this', 'is', 'the', 'fourth', 'record', '[SEP]'] , )
def UpperCAmelCase ( self : Any ) -> List[str]:
__lowerCAmelCase: Union[str, Any] = self.get_config()
__lowerCAmelCase: Optional[Any] = self.get_dummy_retriever()
__lowerCAmelCase: Optional[int] = retriever.tokenizer
__lowerCAmelCase: Tuple = np.array([0, 3, 5] , dtype='long' )
__lowerCAmelCase: Union[str, Any] = tokenizer(['Test question'] ).input_ids
__lowerCAmelCase: List[Any] = tokenizer(
['the fourth', 'longer longer'] , add_special_tokens=UpperCAmelCase , return_token_type_ids=UpperCAmelCase , return_attention_mask=UpperCAmelCase , ).input_ids
__lowerCAmelCase: int = config.reader_seq_len
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase: str = retriever(
UpperCAmelCase , UpperCAmelCase , answer_ids=UpperCAmelCase , max_length=UpperCAmelCase , return_tensors='np' )
self.assertEqual([False, True, True] , UpperCAmelCase )
self.assertEqual([[-1, -1, -1], [6, -1, -1], [6, 7, 8]] , UpperCAmelCase )
self.assertEqual([[-1, -1, -1], [7, -1, -1], [7, 8, 9]] , UpperCAmelCase )
def UpperCAmelCase ( self : int ) -> int:
__lowerCAmelCase: Optional[Any] = self.get_dummy_retriever()
retriever.save_pretrained(os.path.join(self.tmpdirname , 'realm_block_records' ) )
# Test local path
__lowerCAmelCase: Union[str, Any] = retriever.from_pretrained(os.path.join(self.tmpdirname , 'realm_block_records' ) )
self.assertEqual(retriever.block_records[0] , b'This is the first record' )
# Test mocked remote path
with patch('transformers.models.realm.retrieval_realm.hf_hub_download' ) as mock_hf_hub_download:
__lowerCAmelCase: int = os.path.join(
os.path.join(self.tmpdirname , 'realm_block_records' ) , _REALM_BLOCK_RECORDS_FILENAME )
__lowerCAmelCase: List[Any] = RealmRetriever.from_pretrained('google/realm-cc-news-pretrained-openqa' )
self.assertEqual(retriever.block_records[0] , b'This is the first record' )
| 322 |
from .testing import (
are_the_same_tensors,
execute_subprocess_async,
require_bnb,
require_cpu,
require_cuda,
require_huggingface_suite,
require_mps,
require_multi_gpu,
require_multi_xpu,
require_safetensors,
require_single_gpu,
require_single_xpu,
require_torch_min_version,
require_tpu,
require_xpu,
skip,
slow,
)
from .training import RegressionDataset, RegressionModel, RegressionModelaXPU
from .scripts import test_script, test_sync, test_ops # isort: skip
| 322 | 1 |
from __future__ import annotations
from fractions import Fraction
from math import gcd, sqrt
def _a ( SCREAMING_SNAKE_CASE : int ) -> bool:
"""simple docstring"""
__lowerCAmelCase: int = int(number**0.5 )
return number == sq * sq
def _a ( SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : int ) -> tuple[int, int]:
"""simple docstring"""
__lowerCAmelCase: int = x_num * y_den * z_den + y_num * x_den * z_den + z_num * x_den * y_den
__lowerCAmelCase: int = x_den * y_den * z_den
__lowerCAmelCase: int = gcd(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
top //= hcf
bottom //= hcf
return top, bottom
def _a ( SCREAMING_SNAKE_CASE : int = 35 ) -> int:
"""simple docstring"""
__lowerCAmelCase: set = set()
__lowerCAmelCase: int
__lowerCAmelCase: Fraction = Fraction(0 )
__lowerCAmelCase: tuple[int, int]
for x_num in range(1 , order + 1 ):
for x_den in range(x_num + 1 , order + 1 ):
for y_num in range(1 , order + 1 ):
for y_den in range(y_num + 1 , order + 1 ):
# n=1
__lowerCAmelCase: Optional[int] = x_num * y_den + x_den * y_num
__lowerCAmelCase: Union[str, Any] = x_den * y_den
__lowerCAmelCase: List[str] = gcd(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
__lowerCAmelCase: List[Any] = add_three(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
unique_s.add(SCREAMING_SNAKE_CASE )
# n=2
__lowerCAmelCase: Dict = (
x_num * x_num * y_den * y_den + x_den * x_den * y_num * y_num
)
__lowerCAmelCase: Union[str, Any] = x_den * x_den * y_den * y_den
if is_sq(SCREAMING_SNAKE_CASE ) and is_sq(SCREAMING_SNAKE_CASE ):
__lowerCAmelCase: List[str] = int(sqrt(SCREAMING_SNAKE_CASE ) )
__lowerCAmelCase: str = int(sqrt(SCREAMING_SNAKE_CASE ) )
__lowerCAmelCase: Any = gcd(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
__lowerCAmelCase: Tuple = add_three(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
unique_s.add(SCREAMING_SNAKE_CASE )
# n=-1
__lowerCAmelCase: List[Any] = x_num * y_num
__lowerCAmelCase: List[str] = x_den * y_num + x_num * y_den
__lowerCAmelCase: Optional[Any] = gcd(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
__lowerCAmelCase: Dict = add_three(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
unique_s.add(SCREAMING_SNAKE_CASE )
# n=2
__lowerCAmelCase: Dict = x_num * x_num * y_num * y_num
__lowerCAmelCase: int = (
x_den * x_den * y_num * y_num + x_num * x_num * y_den * y_den
)
if is_sq(SCREAMING_SNAKE_CASE ) and is_sq(SCREAMING_SNAKE_CASE ):
__lowerCAmelCase: int = int(sqrt(SCREAMING_SNAKE_CASE ) )
__lowerCAmelCase: Optional[Any] = int(sqrt(SCREAMING_SNAKE_CASE ) )
__lowerCAmelCase: Dict = gcd(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
__lowerCAmelCase: List[str] = add_three(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
unique_s.add(SCREAMING_SNAKE_CASE )
for num, den in unique_s:
total += Fraction(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
return total.denominator + total.numerator
if __name__ == "__main__":
print(f"{solution() = }")
| 322 |
import re
from typing import Callable, List, Optional, Union
import tensorflow as tf
try:
from tensorflow.keras.optimizers.legacy import Adam
except ImportError:
from tensorflow.keras.optimizers import Adam
class A_ ( tf.keras.optimizers.schedules.LearningRateSchedule ):
def __init__( self : Tuple , UpperCAmelCase : float , UpperCAmelCase : Callable , UpperCAmelCase : int , UpperCAmelCase : float = 1.0 , UpperCAmelCase : str = None , ) -> Union[str, Any]:
super().__init__()
__lowerCAmelCase: Optional[Any] = initial_learning_rate
__lowerCAmelCase: str = warmup_steps
__lowerCAmelCase: Optional[int] = power
__lowerCAmelCase: str = decay_schedule_fn
__lowerCAmelCase: Tuple = name
def __call__( self : int , UpperCAmelCase : Dict ) -> Optional[int]:
with tf.name_scope(self.name or 'WarmUp' ) as name:
# Implements polynomial warmup. i.e., if global_step < warmup_steps, the
# learning rate will be `global_step/num_warmup_steps * init_lr`.
__lowerCAmelCase: List[str] = tf.cast(UpperCAmelCase , tf.floataa )
__lowerCAmelCase: Tuple = tf.cast(self.warmup_steps , tf.floataa )
__lowerCAmelCase: List[str] = global_step_float / warmup_steps_float
__lowerCAmelCase: List[str] = self.initial_learning_rate * tf.math.pow(UpperCAmelCase , self.power )
return tf.cond(
global_step_float < warmup_steps_float , lambda: warmup_learning_rate , lambda: self.decay_schedule_fn(step - self.warmup_steps ) , name=UpperCAmelCase , )
def UpperCAmelCase ( self : Tuple ) -> int:
return {
"initial_learning_rate": self.initial_learning_rate,
"decay_schedule_fn": self.decay_schedule_fn,
"warmup_steps": self.warmup_steps,
"power": self.power,
"name": self.name,
}
def _a ( SCREAMING_SNAKE_CASE : float , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : float = 0.0 , SCREAMING_SNAKE_CASE : float = 0.9 , SCREAMING_SNAKE_CASE : float = 0.9_9_9 , SCREAMING_SNAKE_CASE : float = 1E-8 , SCREAMING_SNAKE_CASE : Optional[float] = None , SCREAMING_SNAKE_CASE : Optional[float] = None , SCREAMING_SNAKE_CASE : float = 0.0 , SCREAMING_SNAKE_CASE : float = 1.0 , SCREAMING_SNAKE_CASE : Optional[List[str]] = None , ) -> Optional[Any]:
"""simple docstring"""
__lowerCAmelCase: Tuple = tf.keras.optimizers.schedules.PolynomialDecay(
initial_learning_rate=SCREAMING_SNAKE_CASE , decay_steps=num_train_steps - num_warmup_steps , end_learning_rate=init_lr * min_lr_ratio , power=SCREAMING_SNAKE_CASE , )
if num_warmup_steps:
__lowerCAmelCase: Optional[int] = WarmUp(
initial_learning_rate=SCREAMING_SNAKE_CASE , decay_schedule_fn=SCREAMING_SNAKE_CASE , warmup_steps=SCREAMING_SNAKE_CASE , )
if weight_decay_rate > 0.0:
__lowerCAmelCase: List[Any] = AdamWeightDecay(
learning_rate=SCREAMING_SNAKE_CASE , weight_decay_rate=SCREAMING_SNAKE_CASE , beta_a=SCREAMING_SNAKE_CASE , beta_a=SCREAMING_SNAKE_CASE , epsilon=SCREAMING_SNAKE_CASE , clipnorm=SCREAMING_SNAKE_CASE , global_clipnorm=SCREAMING_SNAKE_CASE , exclude_from_weight_decay=['LayerNorm', 'layer_norm', 'bias'] , include_in_weight_decay=SCREAMING_SNAKE_CASE , )
else:
__lowerCAmelCase: Dict = tf.keras.optimizers.Adam(
learning_rate=SCREAMING_SNAKE_CASE , beta_a=SCREAMING_SNAKE_CASE , beta_a=SCREAMING_SNAKE_CASE , epsilon=SCREAMING_SNAKE_CASE , clipnorm=SCREAMING_SNAKE_CASE , global_clipnorm=SCREAMING_SNAKE_CASE , )
# We return the optimizer and the LR scheduler in order to better track the
# evolution of the LR independently of the optimizer.
return optimizer, lr_schedule
class A_ ( snake_case__ ):
def __init__( self : Tuple , UpperCAmelCase : Union[float, tf.keras.optimizers.schedules.LearningRateSchedule] = 0.001 , UpperCAmelCase : float = 0.9 , UpperCAmelCase : float = 0.999 , UpperCAmelCase : float = 1E-7 , UpperCAmelCase : bool = False , UpperCAmelCase : float = 0.0 , UpperCAmelCase : Optional[List[str]] = None , UpperCAmelCase : Optional[List[str]] = None , UpperCAmelCase : str = "AdamWeightDecay" , **UpperCAmelCase : str , ) -> int:
super().__init__(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , **UpperCAmelCase )
__lowerCAmelCase: List[Any] = weight_decay_rate
__lowerCAmelCase: List[str] = include_in_weight_decay
__lowerCAmelCase: Optional[Any] = exclude_from_weight_decay
@classmethod
def UpperCAmelCase ( cls : str , UpperCAmelCase : Tuple ) -> Optional[int]:
__lowerCAmelCase: Union[str, Any] = {'WarmUp': WarmUp}
return super(UpperCAmelCase , cls ).from_config(UpperCAmelCase , custom_objects=UpperCAmelCase )
def UpperCAmelCase ( self : Optional[int] , UpperCAmelCase : Any , UpperCAmelCase : int , UpperCAmelCase : Optional[int] ) -> Union[str, Any]:
super(UpperCAmelCase , self )._prepare_local(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
__lowerCAmelCase: Union[str, Any] = tf.constant(
self.weight_decay_rate , name='adam_weight_decay_rate' )
def UpperCAmelCase ( self : Dict , UpperCAmelCase : Optional[int] , UpperCAmelCase : List[str] , UpperCAmelCase : Optional[Any] ) -> List[str]:
__lowerCAmelCase: Dict = self._do_use_weight_decay(var.name )
if do_decay:
return var.assign_sub(
learning_rate * var * apply_state[(var.device, var.dtype.base_dtype)]['weight_decay_rate'] , use_locking=self._use_locking , )
return tf.no_op()
def UpperCAmelCase ( self : Optional[int] , UpperCAmelCase : Optional[Any] , UpperCAmelCase : Optional[int]=None , **UpperCAmelCase : Optional[Any] ) -> Union[str, Any]:
__lowerCAmelCase , __lowerCAmelCase: Tuple = list(zip(*UpperCAmelCase ) )
return super(UpperCAmelCase , self ).apply_gradients(zip(UpperCAmelCase , UpperCAmelCase ) , name=UpperCAmelCase , **UpperCAmelCase )
def UpperCAmelCase ( self : str , UpperCAmelCase : Optional[Any] , UpperCAmelCase : List[str] , UpperCAmelCase : Any ) -> str:
if apply_state is None:
return self._decayed_lr_t[var_dtype], {}
__lowerCAmelCase: Dict = apply_state or {}
__lowerCAmelCase: Union[str, Any] = apply_state.get((var_device, var_dtype) )
if coefficients is None:
__lowerCAmelCase: str = self._fallback_apply_state(UpperCAmelCase , UpperCAmelCase )
__lowerCAmelCase: Tuple = coefficients
return coefficients["lr_t"], {"apply_state": apply_state}
def UpperCAmelCase ( self : str , UpperCAmelCase : Tuple , UpperCAmelCase : Optional[Any] , UpperCAmelCase : List[Any]=None ) -> List[Any]:
__lowerCAmelCase , __lowerCAmelCase: Optional[int] = self._get_lr(var.device , var.dtype.base_dtype , UpperCAmelCase )
__lowerCAmelCase: Optional[int] = self._decay_weights_op(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
with tf.control_dependencies([decay] ):
return super(UpperCAmelCase , self )._resource_apply_dense(UpperCAmelCase , UpperCAmelCase , **UpperCAmelCase )
def UpperCAmelCase ( self : List[Any] , UpperCAmelCase : List[str] , UpperCAmelCase : Any , UpperCAmelCase : str , UpperCAmelCase : List[Any]=None ) -> List[str]:
__lowerCAmelCase , __lowerCAmelCase: Any = self._get_lr(var.device , var.dtype.base_dtype , UpperCAmelCase )
__lowerCAmelCase: str = self._decay_weights_op(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
with tf.control_dependencies([decay] ):
return super(UpperCAmelCase , self )._resource_apply_sparse(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , **UpperCAmelCase )
def UpperCAmelCase ( self : Union[str, Any] ) -> List[str]:
__lowerCAmelCase: List[str] = super().get_config()
config.update({'weight_decay_rate': self.weight_decay_rate} )
return config
def UpperCAmelCase ( self : Union[str, Any] , UpperCAmelCase : Union[str, Any] ) -> Union[str, Any]:
if self.weight_decay_rate == 0:
return False
if self._include_in_weight_decay:
for r in self._include_in_weight_decay:
if re.search(UpperCAmelCase , UpperCAmelCase ) is not None:
return True
if self._exclude_from_weight_decay:
for r in self._exclude_from_weight_decay:
if re.search(UpperCAmelCase , UpperCAmelCase ) is not None:
return False
return True
class A_ ( snake_case__ ):
def __init__( self : int ) -> List[Any]:
__lowerCAmelCase: Tuple = []
__lowerCAmelCase: int = None
@property
def UpperCAmelCase ( self : Dict ) -> List[Any]:
if self._accum_steps is None:
__lowerCAmelCase: List[Any] = tf.Variable(
tf.constant(0 , dtype=tf.intaa ) , trainable=UpperCAmelCase , synchronization=tf.VariableSynchronization.ON_READ , aggregation=tf.VariableAggregation.ONLY_FIRST_REPLICA , )
return self._accum_steps.value()
@property
def UpperCAmelCase ( self : Union[str, Any] ) -> int:
if not self._gradients:
raise ValueError('The accumulator should be called first to initialize the gradients' )
return [gradient.value() if gradient is not None else gradient for gradient in self._gradients]
def __call__( self : Optional[Any] , UpperCAmelCase : Any ) -> Any:
if not self._gradients:
__lowerCAmelCase: Any = self.step # Create the step variable.
self._gradients.extend(
[
tf.Variable(
tf.zeros_like(UpperCAmelCase ) , trainable=UpperCAmelCase , synchronization=tf.VariableSynchronization.ON_READ , aggregation=tf.VariableAggregation.ONLY_FIRST_REPLICA , )
if gradient is not None
else gradient
for gradient in gradients
] )
if len(UpperCAmelCase ) != len(self._gradients ):
raise ValueError(F'''Expected {len(self._gradients )} gradients, but got {len(UpperCAmelCase )}''' )
for accum_gradient, gradient in zip(self._gradients , UpperCAmelCase ):
if accum_gradient is not None and gradient is not None:
accum_gradient.assign_add(UpperCAmelCase )
self._accum_steps.assign_add(1 )
def UpperCAmelCase ( self : int ) -> int:
if not self._gradients:
return
self._accum_steps.assign(0 )
for gradient in self._gradients:
if gradient is not None:
gradient.assign(tf.zeros_like(UpperCAmelCase ) )
| 322 | 1 |
import warnings
from ...utils import logging
from .image_processing_yolos import YolosImageProcessor
_a = logging.get_logger(__name__)
class A_ ( snake_case__ ):
def __init__( self : Union[str, Any] , *UpperCAmelCase : Any , **UpperCAmelCase : int ) -> None:
warnings.warn(
'The class YolosFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'
' use YolosImageProcessor instead.' , UpperCAmelCase , )
super().__init__(*UpperCAmelCase , **UpperCAmelCase )
| 322 |
import math
from typing import Callable, List, Optional, Union
import numpy as np
import PIL
import torch
from PIL import Image
from transformers import CLIPTextModel, CLIPTokenizer
from diffusers.models import AutoencoderKL, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_upscale import StableDiffusionUpscalePipeline
from diffusers.schedulers import DDIMScheduler, DDPMScheduler, LMSDiscreteScheduler, PNDMScheduler
def _a ( SCREAMING_SNAKE_CASE : Any , SCREAMING_SNAKE_CASE : Any , SCREAMING_SNAKE_CASE : Optional[Any]=[] ) -> str:
"""simple docstring"""
__lowerCAmelCase: Optional[int] = size[0] - overlap_pixels * 2
__lowerCAmelCase: str = size[1] - overlap_pixels * 2
for letter in ["l", "r"]:
if letter in remove_borders:
size_x += overlap_pixels
for letter in ["t", "b"]:
if letter in remove_borders:
size_y += overlap_pixels
__lowerCAmelCase: Any = np.ones((size_y, size_x) , dtype=np.uinta ) * 2_55
__lowerCAmelCase: int = np.pad(SCREAMING_SNAKE_CASE , mode='linear_ramp' , pad_width=SCREAMING_SNAKE_CASE , end_values=0 )
if "l" in remove_borders:
__lowerCAmelCase: Dict = mask[:, overlap_pixels : mask.shape[1]]
if "r" in remove_borders:
__lowerCAmelCase: Tuple = mask[:, 0 : mask.shape[1] - overlap_pixels]
if "t" in remove_borders:
__lowerCAmelCase: List[Any] = mask[overlap_pixels : mask.shape[0], :]
if "b" in remove_borders:
__lowerCAmelCase: List[str] = mask[0 : mask.shape[0] - overlap_pixels, :]
return mask
def _a ( SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : List[str] ) -> List[Any]:
"""simple docstring"""
return max(SCREAMING_SNAKE_CASE , min(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) )
def _a ( SCREAMING_SNAKE_CASE : [int] , SCREAMING_SNAKE_CASE : [int] , SCREAMING_SNAKE_CASE : [int] ) -> int:
"""simple docstring"""
return (
clamp(rect[0] , min[0] , max[0] ),
clamp(rect[1] , min[1] , max[1] ),
clamp(rect[2] , min[0] , max[0] ),
clamp(rect[3] , min[1] , max[1] ),
)
def _a ( SCREAMING_SNAKE_CASE : [int] , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : [int] ) -> Union[str, Any]:
"""simple docstring"""
__lowerCAmelCase: Tuple = list(SCREAMING_SNAKE_CASE )
rect[0] -= overlap
rect[1] -= overlap
rect[2] += overlap
rect[3] += overlap
__lowerCAmelCase: int = clamp_rect(SCREAMING_SNAKE_CASE , [0, 0] , [image_size[0], image_size[1]] )
return rect
def _a ( SCREAMING_SNAKE_CASE : Dict , SCREAMING_SNAKE_CASE : Dict , SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : Optional[Any] ) -> Any:
"""simple docstring"""
__lowerCAmelCase: List[Any] = Image.new('RGB' , (tile.size[0] + original_slice, tile.size[1]) )
result.paste(
original_image.resize((tile.size[0], tile.size[1]) , Image.BICUBIC ).crop(
(slice_x, 0, slice_x + original_slice, tile.size[1]) ) , (0, 0) , )
result.paste(SCREAMING_SNAKE_CASE , (original_slice, 0) )
return result
def _a ( SCREAMING_SNAKE_CASE : List[Any] , SCREAMING_SNAKE_CASE : Any ) -> str:
"""simple docstring"""
__lowerCAmelCase: Union[str, Any] = (original_image_slice * 4, 0, tile.size[0], tile.size[1])
__lowerCAmelCase: List[Any] = tile.crop(SCREAMING_SNAKE_CASE )
return tile
def _a ( SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : List[Any] ) -> Optional[int]:
"""simple docstring"""
__lowerCAmelCase: List[str] = n % d
return n - divisor
class A_ ( snake_case__ ):
def __init__( self : Optional[Any] , UpperCAmelCase : AutoencoderKL , UpperCAmelCase : CLIPTextModel , UpperCAmelCase : CLIPTokenizer , UpperCAmelCase : UNetaDConditionModel , UpperCAmelCase : DDPMScheduler , UpperCAmelCase : Union[DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler] , UpperCAmelCase : int = 3_5_0 , ) -> Optional[Any]:
super().__init__(
vae=UpperCAmelCase , text_encoder=UpperCAmelCase , tokenizer=UpperCAmelCase , unet=UpperCAmelCase , low_res_scheduler=UpperCAmelCase , scheduler=UpperCAmelCase , max_noise_level=UpperCAmelCase , )
def UpperCAmelCase ( self : Any , UpperCAmelCase : List[str] , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : str , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : Optional[int] , UpperCAmelCase : Dict , UpperCAmelCase : str , **UpperCAmelCase : List[Any] ) -> Optional[int]:
torch.manual_seed(0 )
__lowerCAmelCase: Optional[int] = (
min(image.size[0] - (tile_size + original_image_slice) , x * tile_size ),
min(image.size[1] - (tile_size + original_image_slice) , y * tile_size ),
min(image.size[0] , (x + 1) * tile_size ),
min(image.size[1] , (y + 1) * tile_size ),
)
__lowerCAmelCase: Optional[Any] = add_overlap_rect(UpperCAmelCase , UpperCAmelCase , image.size )
__lowerCAmelCase: Any = image.crop(UpperCAmelCase )
__lowerCAmelCase: Any = ((crop_rect[0] + ((crop_rect[2] - crop_rect[0]) / 2)) / image.size[0]) * tile.size[0]
__lowerCAmelCase: Tuple = translated_slice_x - (original_image_slice / 2)
__lowerCAmelCase: Union[str, Any] = max(0 , UpperCAmelCase )
__lowerCAmelCase: Optional[int] = squeeze_tile(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
__lowerCAmelCase: Union[str, Any] = to_input.size
__lowerCAmelCase: List[Any] = to_input.resize((tile_size, tile_size) , Image.BICUBIC )
__lowerCAmelCase: int = super(UpperCAmelCase , self ).__call__(image=UpperCAmelCase , **UpperCAmelCase ).images[0]
__lowerCAmelCase: Dict = upscaled_tile.resize((orig_input_size[0] * 4, orig_input_size[1] * 4) , Image.BICUBIC )
__lowerCAmelCase: Union[str, Any] = unsqueeze_tile(UpperCAmelCase , UpperCAmelCase )
__lowerCAmelCase: Optional[int] = upscaled_tile.resize((tile.size[0] * 4, tile.size[1] * 4) , Image.BICUBIC )
__lowerCAmelCase: Optional[int] = []
if x == 0:
remove_borders.append('l' )
elif crop_rect[2] == image.size[0]:
remove_borders.append('r' )
if y == 0:
remove_borders.append('t' )
elif crop_rect[3] == image.size[1]:
remove_borders.append('b' )
__lowerCAmelCase: int = Image.fromarray(
make_transparency_mask(
(upscaled_tile.size[0], upscaled_tile.size[1]) , tile_border * 4 , remove_borders=UpperCAmelCase ) , mode='L' , )
final_image.paste(
UpperCAmelCase , (crop_rect_with_overlap[0] * 4, crop_rect_with_overlap[1] * 4) , UpperCAmelCase )
@torch.no_grad()
def __call__( self : Optional[Any] , UpperCAmelCase : Union[str, List[str]] , UpperCAmelCase : Union[PIL.Image.Image, List[PIL.Image.Image]] , UpperCAmelCase : int = 7_5 , UpperCAmelCase : float = 9.0 , UpperCAmelCase : int = 5_0 , UpperCAmelCase : Optional[Union[str, List[str]]] = None , UpperCAmelCase : Optional[int] = 1 , UpperCAmelCase : float = 0.0 , UpperCAmelCase : Optional[torch.Generator] = None , UpperCAmelCase : Optional[torch.FloatTensor] = None , UpperCAmelCase : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , UpperCAmelCase : int = 1 , UpperCAmelCase : int = 1_2_8 , UpperCAmelCase : int = 3_2 , UpperCAmelCase : int = 3_2 , ) -> str:
__lowerCAmelCase: List[Any] = Image.new('RGB' , (image.size[0] * 4, image.size[1] * 4) )
__lowerCAmelCase: str = math.ceil(image.size[0] / tile_size )
__lowerCAmelCase: List[Any] = math.ceil(image.size[1] / tile_size )
__lowerCAmelCase: Optional[Any] = tcx * tcy
__lowerCAmelCase: Tuple = 0
for y in range(UpperCAmelCase ):
for x in range(UpperCAmelCase ):
self._process_tile(
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , prompt=UpperCAmelCase , num_inference_steps=UpperCAmelCase , guidance_scale=UpperCAmelCase , noise_level=UpperCAmelCase , negative_prompt=UpperCAmelCase , num_images_per_prompt=UpperCAmelCase , eta=UpperCAmelCase , generator=UpperCAmelCase , latents=UpperCAmelCase , )
current_count += 1
if callback is not None:
callback({'progress': current_count / total_tile_count, 'image': final_image} )
return final_image
def _a ( ) -> int:
"""simple docstring"""
__lowerCAmelCase: Any = 'stabilityai/stable-diffusion-x4-upscaler'
__lowerCAmelCase: Dict = StableDiffusionTiledUpscalePipeline.from_pretrained(SCREAMING_SNAKE_CASE , revision='fp16' , torch_dtype=torch.floataa )
__lowerCAmelCase: Optional[Any] = pipe.to('cuda' )
__lowerCAmelCase: Tuple = Image.open('../../docs/source/imgs/diffusers_library.jpg' )
def callback(SCREAMING_SNAKE_CASE : Tuple ):
print(f'''progress: {obj['progress']:.4f}''' )
obj["image"].save('diffusers_library_progress.jpg' )
__lowerCAmelCase: str = pipe(image=SCREAMING_SNAKE_CASE , prompt='Black font, white background, vector' , noise_level=40 , callback=SCREAMING_SNAKE_CASE )
final_image.save('diffusers_library.jpg' )
if __name__ == "__main__":
main()
| 322 | 1 |
import logging
import math
from functools import partial
from typing import Any, Callable, Dict, Iterable, List, Optional, Sequence, Tuple, Union
import torch
from .tensor_utils import tensor_tree_map, tree_map
def _a ( SCREAMING_SNAKE_CASE : Union[dict, list, tuple, torch.Tensor] ) -> List[Tuple[int, ...]]:
"""simple docstring"""
__lowerCAmelCase: List[str] = []
if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
for v in tree.values():
shapes.extend(_fetch_dims(SCREAMING_SNAKE_CASE ) )
elif isinstance(SCREAMING_SNAKE_CASE , (list, tuple) ):
for t in tree:
shapes.extend(_fetch_dims(SCREAMING_SNAKE_CASE ) )
elif isinstance(SCREAMING_SNAKE_CASE , torch.Tensor ):
shapes.append(tree.shape )
else:
raise ValueError('Not supported' )
return shapes
@torch.jit.ignore
def _a ( SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : Tuple[int, ...] ) -> Tuple[int, ...]:
"""simple docstring"""
__lowerCAmelCase: Union[str, Any] = []
for d in reversed(SCREAMING_SNAKE_CASE ):
idx.append(flat_idx % d )
__lowerCAmelCase: Any = flat_idx // d
return tuple(reversed(SCREAMING_SNAKE_CASE ) )
@torch.jit.ignore
def _a ( SCREAMING_SNAKE_CASE : Sequence[int] , SCREAMING_SNAKE_CASE : Sequence[int] , SCREAMING_SNAKE_CASE : Sequence[int] , SCREAMING_SNAKE_CASE : Optional[Sequence[bool]] = None , SCREAMING_SNAKE_CASE : Optional[Sequence[bool]] = None , ) -> List[Tuple[slice, ...]]:
"""simple docstring"""
def reduce_edge_list(SCREAMING_SNAKE_CASE : List[bool] ) -> None:
__lowerCAmelCase: Dict = True
for i in range(len(SCREAMING_SNAKE_CASE ) ):
__lowerCAmelCase: int = -1 * (i + 1)
l[reversed_idx] &= tally
__lowerCAmelCase: List[Any] = l[reversed_idx]
if start_edges is None:
__lowerCAmelCase: Optional[int] = [s == 0 for s in start]
reduce_edge_list(SCREAMING_SNAKE_CASE )
if end_edges is None:
__lowerCAmelCase: Optional[Any] = [e == (d - 1) for e, d in zip(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )]
reduce_edge_list(SCREAMING_SNAKE_CASE )
# Base cases. Either start/end are empty and we're done, or the final,
# one-dimensional tensor can be simply sliced
if len(SCREAMING_SNAKE_CASE ) == 0:
return [()]
elif len(SCREAMING_SNAKE_CASE ) == 1:
return [(slice(start[0] , end[0] + 1 ),)]
__lowerCAmelCase: List[Tuple[slice, ...]] = []
__lowerCAmelCase: List[slice] = []
# Dimensions common to start and end can be selected directly
for s, e in zip(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
if s == e:
path_list.append(slice(SCREAMING_SNAKE_CASE , s + 1 ) )
else:
break
__lowerCAmelCase: Tuple[slice, ...] = tuple(SCREAMING_SNAKE_CASE )
__lowerCAmelCase: Any = len(SCREAMING_SNAKE_CASE )
# start == end, and we're done
if divergence_idx == len(SCREAMING_SNAKE_CASE ):
return [path]
def upper() -> Tuple[Tuple[slice, ...], ...]:
assert start_edges is not None
assert end_edges is not None
__lowerCAmelCase: Union[str, Any] = start[divergence_idx]
return tuple(
path + (slice(SCREAMING_SNAKE_CASE , sdi + 1 ),) + s
for s in _get_minimal_slice_set(
start[divergence_idx + 1 :] , [d - 1 for d in dims[divergence_idx + 1 :]] , dims[divergence_idx + 1 :] , start_edges=start_edges[divergence_idx + 1 :] , end_edges=[True for _ in end_edges[divergence_idx + 1 :]] , ) )
def lower() -> Tuple[Tuple[slice, ...], ...]:
assert start_edges is not None
assert end_edges is not None
__lowerCAmelCase: int = end[divergence_idx]
return tuple(
path + (slice(SCREAMING_SNAKE_CASE , edi + 1 ),) + s
for s in _get_minimal_slice_set(
[0 for _ in start[divergence_idx + 1 :]] , end[divergence_idx + 1 :] , dims[divergence_idx + 1 :] , start_edges=[True for _ in start_edges[divergence_idx + 1 :]] , end_edges=end_edges[divergence_idx + 1 :] , ) )
# If both start and end are at the edges of the subtree rooted at
# divergence_idx, we can just select the whole subtree at once
if start_edges[divergence_idx] and end_edges[divergence_idx]:
slices.append(path + (slice(start[divergence_idx] , end[divergence_idx] + 1 ),) )
# If just start is at the edge, we can grab almost all of the subtree,
# treating only the ragged bottom edge as an edge case
elif start_edges[divergence_idx]:
slices.append(path + (slice(start[divergence_idx] , end[divergence_idx] ),) )
slices.extend(lower() )
# Analogous to the previous case, but the top is ragged this time
elif end_edges[divergence_idx]:
slices.extend(upper() )
slices.append(path + (slice(start[divergence_idx] + 1 , end[divergence_idx] + 1 ),) )
# If both sides of the range are ragged, we need to handle both sides
# separately. If there's contiguous meat in between them, we can index it
# in one big chunk
else:
slices.extend(upper() )
__lowerCAmelCase: List[Any] = end[divergence_idx] - start[divergence_idx]
if middle_ground > 1:
slices.append(path + (slice(start[divergence_idx] + 1 , end[divergence_idx] ),) )
slices.extend(lower() )
return slices
@torch.jit.ignore
def _a ( SCREAMING_SNAKE_CASE : torch.Tensor , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : int ) -> torch.Tensor:
"""simple docstring"""
__lowerCAmelCase: Optional[Any] = t.shape[:no_batch_dims]
__lowerCAmelCase: Any = list(_flat_idx_to_idx(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) )
# _get_minimal_slice_set is inclusive
__lowerCAmelCase: Optional[int] = list(_flat_idx_to_idx(flat_end - 1 , SCREAMING_SNAKE_CASE ) )
# Get an ordered list of slices to perform
__lowerCAmelCase: Tuple = _get_minimal_slice_set(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , )
__lowerCAmelCase: Optional[int] = [t[s] for s in slices]
return torch.cat([s.view((-1,) + t.shape[no_batch_dims:] ) for s in sliced_tensors] )
def _a ( SCREAMING_SNAKE_CASE : Callable , SCREAMING_SNAKE_CASE : Dict[str, Any] , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : bool = False , SCREAMING_SNAKE_CASE : Any = None , SCREAMING_SNAKE_CASE : bool = False , ) -> Any:
"""simple docstring"""
if not (len(SCREAMING_SNAKE_CASE ) > 0):
raise ValueError('Must provide at least one input' )
__lowerCAmelCase: int = [shape[:no_batch_dims] for shape in _fetch_dims(SCREAMING_SNAKE_CASE )]
__lowerCAmelCase: Union[str, Any] = tuple([max(SCREAMING_SNAKE_CASE ) for s in zip(*SCREAMING_SNAKE_CASE )] )
def _prep_inputs(SCREAMING_SNAKE_CASE : torch.Tensor ) -> torch.Tensor:
if not low_mem:
if not sum(t.shape[:no_batch_dims] ) == no_batch_dims:
__lowerCAmelCase: Union[str, Any] = t.expand(orig_batch_dims + t.shape[no_batch_dims:] )
__lowerCAmelCase: Tuple = t.reshape(-1 , *t.shape[no_batch_dims:] )
else:
__lowerCAmelCase: Optional[Any] = t.expand(orig_batch_dims + t.shape[no_batch_dims:] )
return t
__lowerCAmelCase: Dict[str, Any] = tensor_tree_map(_prep_inputs , SCREAMING_SNAKE_CASE )
__lowerCAmelCase: Optional[Any] = None
if _out is not None:
__lowerCAmelCase: List[str] = tensor_tree_map(lambda SCREAMING_SNAKE_CASE : t.view([-1] + list(t.shape[no_batch_dims:] ) ) , _out )
__lowerCAmelCase: Any = 1
for d in orig_batch_dims:
flat_batch_dim *= d
__lowerCAmelCase: List[str] = flat_batch_dim // chunk_size + (flat_batch_dim % chunk_size != 0)
def _select_chunk(SCREAMING_SNAKE_CASE : torch.Tensor ) -> torch.Tensor:
return t[i : i + chunk_size] if t.shape[0] != 1 else t
__lowerCAmelCase: Any = 0
__lowerCAmelCase: Optional[int] = prepped_outputs
for _ in range(SCREAMING_SNAKE_CASE ):
# Chunk the input
if not low_mem:
__lowerCAmelCase: str = _select_chunk
else:
__lowerCAmelCase: Dict = partial(
_chunk_slice , flat_start=SCREAMING_SNAKE_CASE , flat_end=min(SCREAMING_SNAKE_CASE , i + chunk_size ) , no_batch_dims=len(SCREAMING_SNAKE_CASE ) , )
__lowerCAmelCase: Dict[str, Any] = tensor_tree_map(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# Run the layer on the chunk
__lowerCAmelCase: Optional[int] = layer(**SCREAMING_SNAKE_CASE )
# Allocate space for the output
if out is None:
__lowerCAmelCase: Optional[int] = tensor_tree_map(lambda SCREAMING_SNAKE_CASE : t.new_zeros((flat_batch_dim,) + t.shape[1:] ) , SCREAMING_SNAKE_CASE )
# Put the chunk in its pre-allocated space
if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
def assign(SCREAMING_SNAKE_CASE : dict , SCREAMING_SNAKE_CASE : dict ) -> None:
for k, v in da.items():
if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
assign(SCREAMING_SNAKE_CASE , da[k] )
else:
if _add_into_out:
v[i : i + chunk_size] += da[k]
else:
__lowerCAmelCase: Union[str, Any] = da[k]
assign(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
elif isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
for xa, xa in zip(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
if _add_into_out:
xa[i : i + chunk_size] += xa
else:
__lowerCAmelCase: Dict = xa
elif isinstance(SCREAMING_SNAKE_CASE , torch.Tensor ):
if _add_into_out:
out[i : i + chunk_size] += output_chunk
else:
__lowerCAmelCase: Dict = output_chunk
else:
raise ValueError('Not supported' )
i += chunk_size
__lowerCAmelCase: Union[str, Any] = tensor_tree_map(lambda SCREAMING_SNAKE_CASE : t.view(orig_batch_dims + t.shape[1:] ) , SCREAMING_SNAKE_CASE )
return out
class A_ :
def __init__( self : Union[str, Any] , UpperCAmelCase : int = 5_1_2 , ) -> str:
__lowerCAmelCase: str = max_chunk_size
__lowerCAmelCase: Optional[int] = None
__lowerCAmelCase: Optional[tuple] = None
def UpperCAmelCase ( self : List[Any] , UpperCAmelCase : Callable , UpperCAmelCase : tuple , UpperCAmelCase : int ) -> int:
logging.info('Tuning chunk size...' )
if min_chunk_size >= self.max_chunk_size:
return min_chunk_size
__lowerCAmelCase: List[int] = [2**l for l in range(int(math.log(self.max_chunk_size , 2 ) ) + 1 )]
__lowerCAmelCase: Tuple = [c for c in candidates if c > min_chunk_size]
__lowerCAmelCase: int = [min_chunk_size] + candidates
candidates[-1] += 4
def test_chunk_size(UpperCAmelCase : int ) -> bool:
try:
with torch.no_grad():
fn(*UpperCAmelCase , chunk_size=UpperCAmelCase )
return True
except RuntimeError:
return False
__lowerCAmelCase: Optional[Any] = 0
__lowerCAmelCase: Any = len(UpperCAmelCase ) - 1
while i > min_viable_chunk_size_index:
__lowerCAmelCase: Tuple = test_chunk_size(candidates[i] )
if not viable:
__lowerCAmelCase: Dict = (min_viable_chunk_size_index + i) // 2
else:
__lowerCAmelCase: Optional[int] = i
__lowerCAmelCase: Union[str, Any] = (i + len(UpperCAmelCase ) - 1) // 2
return candidates[min_viable_chunk_size_index]
def UpperCAmelCase ( self : Optional[int] , UpperCAmelCase : Iterable , UpperCAmelCase : Iterable ) -> bool:
__lowerCAmelCase: Tuple = True
for aa, aa in zip(UpperCAmelCase , UpperCAmelCase ):
assert type(UpperCAmelCase ) == type(UpperCAmelCase )
if isinstance(UpperCAmelCase , (list, tuple) ):
consistent &= self._compare_arg_caches(UpperCAmelCase , UpperCAmelCase )
elif isinstance(UpperCAmelCase , UpperCAmelCase ):
__lowerCAmelCase: Union[str, Any] = [v for _, v in sorted(aa.items() , key=lambda UpperCAmelCase : x[0] )]
__lowerCAmelCase: Tuple = [v for _, v in sorted(aa.items() , key=lambda UpperCAmelCase : x[0] )]
consistent &= self._compare_arg_caches(UpperCAmelCase , UpperCAmelCase )
else:
consistent &= aa == aa
return consistent
def UpperCAmelCase ( self : Dict , UpperCAmelCase : Callable , UpperCAmelCase : tuple , UpperCAmelCase : int , ) -> int:
__lowerCAmelCase: List[str] = True
__lowerCAmelCase: tuple = tree_map(lambda UpperCAmelCase : a.shape if isinstance(UpperCAmelCase , torch.Tensor ) else a , UpperCAmelCase , UpperCAmelCase )
if self.cached_arg_data is not None:
# If args have changed shape/value, we need to re-tune
assert len(self.cached_arg_data ) == len(UpperCAmelCase )
__lowerCAmelCase: Union[str, Any] = self._compare_arg_caches(self.cached_arg_data , UpperCAmelCase )
else:
# Otherwise, we can reuse the precomputed value
__lowerCAmelCase: str = False
if not consistent:
__lowerCAmelCase: Dict = self._determine_favorable_chunk_size(
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , )
__lowerCAmelCase: Tuple = arg_data
assert self.cached_chunk_size is not None
return self.cached_chunk_size
| 322 |
def _a ( SCREAMING_SNAKE_CASE : str ) -> Union[str, Any]:
"""simple docstring"""
__lowerCAmelCase: str = len(SCREAMING_SNAKE_CASE )
__lowerCAmelCase: List[Any] = sum(SCREAMING_SNAKE_CASE )
__lowerCAmelCase: str = [[False for x in range(s + 1 )] for y in range(n + 1 )]
for i in range(1 , n + 1 ):
__lowerCAmelCase: Tuple = True
for i in range(1 , s + 1 ):
__lowerCAmelCase: Any = False
for i in range(1 , n + 1 ):
for j in range(1 , s + 1 ):
__lowerCAmelCase: Optional[int] = dp[i][j - 1]
if arr[i - 1] <= j:
__lowerCAmelCase: Union[str, Any] = dp[i][j] or dp[i - 1][j - arr[i - 1]]
for j in range(int(s / 2 ) , -1 , -1 ):
if dp[n][j] is True:
__lowerCAmelCase: Tuple = s - 2 * j
break
return diff
| 322 | 1 |
import math
from typing import Callable, List, Optional, Union
import numpy as np
import PIL
import torch
from PIL import Image
from transformers import CLIPTextModel, CLIPTokenizer
from diffusers.models import AutoencoderKL, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_upscale import StableDiffusionUpscalePipeline
from diffusers.schedulers import DDIMScheduler, DDPMScheduler, LMSDiscreteScheduler, PNDMScheduler
def _a ( SCREAMING_SNAKE_CASE : Any , SCREAMING_SNAKE_CASE : Any , SCREAMING_SNAKE_CASE : Optional[Any]=[] ) -> str:
"""simple docstring"""
__lowerCAmelCase: Optional[int] = size[0] - overlap_pixels * 2
__lowerCAmelCase: str = size[1] - overlap_pixels * 2
for letter in ["l", "r"]:
if letter in remove_borders:
size_x += overlap_pixels
for letter in ["t", "b"]:
if letter in remove_borders:
size_y += overlap_pixels
__lowerCAmelCase: Any = np.ones((size_y, size_x) , dtype=np.uinta ) * 2_55
__lowerCAmelCase: int = np.pad(SCREAMING_SNAKE_CASE , mode='linear_ramp' , pad_width=SCREAMING_SNAKE_CASE , end_values=0 )
if "l" in remove_borders:
__lowerCAmelCase: Dict = mask[:, overlap_pixels : mask.shape[1]]
if "r" in remove_borders:
__lowerCAmelCase: Tuple = mask[:, 0 : mask.shape[1] - overlap_pixels]
if "t" in remove_borders:
__lowerCAmelCase: List[Any] = mask[overlap_pixels : mask.shape[0], :]
if "b" in remove_borders:
__lowerCAmelCase: List[str] = mask[0 : mask.shape[0] - overlap_pixels, :]
return mask
def _a ( SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : List[str] ) -> List[Any]:
"""simple docstring"""
return max(SCREAMING_SNAKE_CASE , min(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) )
def _a ( SCREAMING_SNAKE_CASE : [int] , SCREAMING_SNAKE_CASE : [int] , SCREAMING_SNAKE_CASE : [int] ) -> int:
"""simple docstring"""
return (
clamp(rect[0] , min[0] , max[0] ),
clamp(rect[1] , min[1] , max[1] ),
clamp(rect[2] , min[0] , max[0] ),
clamp(rect[3] , min[1] , max[1] ),
)
def _a ( SCREAMING_SNAKE_CASE : [int] , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : [int] ) -> Union[str, Any]:
"""simple docstring"""
__lowerCAmelCase: Tuple = list(SCREAMING_SNAKE_CASE )
rect[0] -= overlap
rect[1] -= overlap
rect[2] += overlap
rect[3] += overlap
__lowerCAmelCase: int = clamp_rect(SCREAMING_SNAKE_CASE , [0, 0] , [image_size[0], image_size[1]] )
return rect
def _a ( SCREAMING_SNAKE_CASE : Dict , SCREAMING_SNAKE_CASE : Dict , SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : Optional[Any] ) -> Any:
"""simple docstring"""
__lowerCAmelCase: List[Any] = Image.new('RGB' , (tile.size[0] + original_slice, tile.size[1]) )
result.paste(
original_image.resize((tile.size[0], tile.size[1]) , Image.BICUBIC ).crop(
(slice_x, 0, slice_x + original_slice, tile.size[1]) ) , (0, 0) , )
result.paste(SCREAMING_SNAKE_CASE , (original_slice, 0) )
return result
def _a ( SCREAMING_SNAKE_CASE : List[Any] , SCREAMING_SNAKE_CASE : Any ) -> str:
"""simple docstring"""
__lowerCAmelCase: Union[str, Any] = (original_image_slice * 4, 0, tile.size[0], tile.size[1])
__lowerCAmelCase: List[Any] = tile.crop(SCREAMING_SNAKE_CASE )
return tile
def _a ( SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : List[Any] ) -> Optional[int]:
"""simple docstring"""
__lowerCAmelCase: List[str] = n % d
return n - divisor
class A_ ( snake_case__ ):
def __init__( self : Optional[Any] , UpperCAmelCase : AutoencoderKL , UpperCAmelCase : CLIPTextModel , UpperCAmelCase : CLIPTokenizer , UpperCAmelCase : UNetaDConditionModel , UpperCAmelCase : DDPMScheduler , UpperCAmelCase : Union[DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler] , UpperCAmelCase : int = 3_5_0 , ) -> Optional[Any]:
super().__init__(
vae=UpperCAmelCase , text_encoder=UpperCAmelCase , tokenizer=UpperCAmelCase , unet=UpperCAmelCase , low_res_scheduler=UpperCAmelCase , scheduler=UpperCAmelCase , max_noise_level=UpperCAmelCase , )
def UpperCAmelCase ( self : Any , UpperCAmelCase : List[str] , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : str , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : Optional[int] , UpperCAmelCase : Dict , UpperCAmelCase : str , **UpperCAmelCase : List[Any] ) -> Optional[int]:
torch.manual_seed(0 )
__lowerCAmelCase: Optional[int] = (
min(image.size[0] - (tile_size + original_image_slice) , x * tile_size ),
min(image.size[1] - (tile_size + original_image_slice) , y * tile_size ),
min(image.size[0] , (x + 1) * tile_size ),
min(image.size[1] , (y + 1) * tile_size ),
)
__lowerCAmelCase: Optional[Any] = add_overlap_rect(UpperCAmelCase , UpperCAmelCase , image.size )
__lowerCAmelCase: Any = image.crop(UpperCAmelCase )
__lowerCAmelCase: Any = ((crop_rect[0] + ((crop_rect[2] - crop_rect[0]) / 2)) / image.size[0]) * tile.size[0]
__lowerCAmelCase: Tuple = translated_slice_x - (original_image_slice / 2)
__lowerCAmelCase: Union[str, Any] = max(0 , UpperCAmelCase )
__lowerCAmelCase: Optional[int] = squeeze_tile(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
__lowerCAmelCase: Union[str, Any] = to_input.size
__lowerCAmelCase: List[Any] = to_input.resize((tile_size, tile_size) , Image.BICUBIC )
__lowerCAmelCase: int = super(UpperCAmelCase , self ).__call__(image=UpperCAmelCase , **UpperCAmelCase ).images[0]
__lowerCAmelCase: Dict = upscaled_tile.resize((orig_input_size[0] * 4, orig_input_size[1] * 4) , Image.BICUBIC )
__lowerCAmelCase: Union[str, Any] = unsqueeze_tile(UpperCAmelCase , UpperCAmelCase )
__lowerCAmelCase: Optional[int] = upscaled_tile.resize((tile.size[0] * 4, tile.size[1] * 4) , Image.BICUBIC )
__lowerCAmelCase: Optional[int] = []
if x == 0:
remove_borders.append('l' )
elif crop_rect[2] == image.size[0]:
remove_borders.append('r' )
if y == 0:
remove_borders.append('t' )
elif crop_rect[3] == image.size[1]:
remove_borders.append('b' )
__lowerCAmelCase: int = Image.fromarray(
make_transparency_mask(
(upscaled_tile.size[0], upscaled_tile.size[1]) , tile_border * 4 , remove_borders=UpperCAmelCase ) , mode='L' , )
final_image.paste(
UpperCAmelCase , (crop_rect_with_overlap[0] * 4, crop_rect_with_overlap[1] * 4) , UpperCAmelCase )
@torch.no_grad()
def __call__( self : Optional[Any] , UpperCAmelCase : Union[str, List[str]] , UpperCAmelCase : Union[PIL.Image.Image, List[PIL.Image.Image]] , UpperCAmelCase : int = 7_5 , UpperCAmelCase : float = 9.0 , UpperCAmelCase : int = 5_0 , UpperCAmelCase : Optional[Union[str, List[str]]] = None , UpperCAmelCase : Optional[int] = 1 , UpperCAmelCase : float = 0.0 , UpperCAmelCase : Optional[torch.Generator] = None , UpperCAmelCase : Optional[torch.FloatTensor] = None , UpperCAmelCase : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , UpperCAmelCase : int = 1 , UpperCAmelCase : int = 1_2_8 , UpperCAmelCase : int = 3_2 , UpperCAmelCase : int = 3_2 , ) -> str:
__lowerCAmelCase: List[Any] = Image.new('RGB' , (image.size[0] * 4, image.size[1] * 4) )
__lowerCAmelCase: str = math.ceil(image.size[0] / tile_size )
__lowerCAmelCase: List[Any] = math.ceil(image.size[1] / tile_size )
__lowerCAmelCase: Optional[Any] = tcx * tcy
__lowerCAmelCase: Tuple = 0
for y in range(UpperCAmelCase ):
for x in range(UpperCAmelCase ):
self._process_tile(
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , prompt=UpperCAmelCase , num_inference_steps=UpperCAmelCase , guidance_scale=UpperCAmelCase , noise_level=UpperCAmelCase , negative_prompt=UpperCAmelCase , num_images_per_prompt=UpperCAmelCase , eta=UpperCAmelCase , generator=UpperCAmelCase , latents=UpperCAmelCase , )
current_count += 1
if callback is not None:
callback({'progress': current_count / total_tile_count, 'image': final_image} )
return final_image
def _a ( ) -> int:
"""simple docstring"""
__lowerCAmelCase: Any = 'stabilityai/stable-diffusion-x4-upscaler'
__lowerCAmelCase: Dict = StableDiffusionTiledUpscalePipeline.from_pretrained(SCREAMING_SNAKE_CASE , revision='fp16' , torch_dtype=torch.floataa )
__lowerCAmelCase: Optional[Any] = pipe.to('cuda' )
__lowerCAmelCase: Tuple = Image.open('../../docs/source/imgs/diffusers_library.jpg' )
def callback(SCREAMING_SNAKE_CASE : Tuple ):
print(f'''progress: {obj['progress']:.4f}''' )
obj["image"].save('diffusers_library_progress.jpg' )
__lowerCAmelCase: str = pipe(image=SCREAMING_SNAKE_CASE , prompt='Black font, white background, vector' , noise_level=40 , callback=SCREAMING_SNAKE_CASE )
final_image.save('diffusers_library.jpg' )
if __name__ == "__main__":
main()
| 322 |
from __future__ import annotations
def _a ( SCREAMING_SNAKE_CASE : list[int] , SCREAMING_SNAKE_CASE : int ) -> list[int]:
"""simple docstring"""
__lowerCAmelCase: int = 0
__lowerCAmelCase: Tuple = len(SCREAMING_SNAKE_CASE ) - 1
while i < j:
if nums[i] + nums[j] == target:
return [i, j]
elif nums[i] + nums[j] < target:
__lowerCAmelCase: Tuple = i + 1
else:
__lowerCAmelCase: List[str] = j - 1
return []
if __name__ == "__main__":
import doctest
doctest.testmod()
print(f"{two_pointer([2, 7, 1_1, 1_5], 9) = }")
| 322 | 1 |
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..bit import BitConfig
_a = logging.get_logger(__name__)
_a = {
'''Intel/dpt-large''': '''https://huggingface.co/Intel/dpt-large/resolve/main/config.json''',
# See all DPT models at https://huggingface.co/models?filter=dpt
}
class A_ ( snake_case__ ):
_lowercase : int = 'dpt'
def __init__( self : Optional[int] , UpperCAmelCase : List[Any]=7_6_8 , UpperCAmelCase : Tuple=1_2 , UpperCAmelCase : Optional[int]=1_2 , UpperCAmelCase : Optional[int]=3_0_7_2 , UpperCAmelCase : int="gelu" , UpperCAmelCase : Dict=0.0 , UpperCAmelCase : Any=0.0 , UpperCAmelCase : Optional[int]=0.02 , UpperCAmelCase : Tuple=1E-12 , UpperCAmelCase : int=3_8_4 , UpperCAmelCase : int=1_6 , UpperCAmelCase : str=3 , UpperCAmelCase : int=False , UpperCAmelCase : Any=True , UpperCAmelCase : int=[2, 5, 8, 1_1] , UpperCAmelCase : Optional[Any]="project" , UpperCAmelCase : Optional[int]=[4, 2, 1, 0.5] , UpperCAmelCase : Optional[int]=[9_6, 1_9_2, 3_8_4, 7_6_8] , UpperCAmelCase : Tuple=2_5_6 , UpperCAmelCase : str=-1 , UpperCAmelCase : int=False , UpperCAmelCase : List[Any]=True , UpperCAmelCase : List[str]=0.4 , UpperCAmelCase : Union[str, Any]=2_5_5 , UpperCAmelCase : Optional[int]=0.1 , UpperCAmelCase : Optional[Any]=[1, 1_0_2_4, 2_4, 2_4] , UpperCAmelCase : str=[0, 1] , UpperCAmelCase : Dict=None , **UpperCAmelCase : Optional[Any] , ) -> Tuple:
super().__init__(**UpperCAmelCase )
__lowerCAmelCase: Optional[Any] = hidden_size
__lowerCAmelCase: Optional[int] = is_hybrid
if self.is_hybrid:
if backbone_config is None:
logger.info('Initializing the config with a `BiT` backbone.' )
__lowerCAmelCase: Dict = {
'global_padding': 'same',
'layer_type': 'bottleneck',
'depths': [3, 4, 9],
'out_features': ['stage1', 'stage2', 'stage3'],
'embedding_dynamic_padding': True,
}
__lowerCAmelCase: Optional[int] = BitConfig(**UpperCAmelCase )
elif isinstance(UpperCAmelCase , UpperCAmelCase ):
logger.info('Initializing the config with a `BiT` backbone.' )
__lowerCAmelCase: Tuple = BitConfig(**UpperCAmelCase )
elif isinstance(UpperCAmelCase , UpperCAmelCase ):
__lowerCAmelCase: List[str] = backbone_config
else:
raise ValueError(
F'''backbone_config must be a dictionary or a `PretrainedConfig`, got {backbone_config.__class__}.''' )
__lowerCAmelCase: str = backbone_featmap_shape
__lowerCAmelCase: List[str] = neck_ignore_stages
if readout_type != "project":
raise ValueError('Readout type must be \'project\' when using `DPT-hybrid` mode.' )
else:
__lowerCAmelCase: Any = None
__lowerCAmelCase: Dict = None
__lowerCAmelCase: Dict = []
__lowerCAmelCase: List[Any] = num_hidden_layers
__lowerCAmelCase: Any = num_attention_heads
__lowerCAmelCase: Union[str, Any] = intermediate_size
__lowerCAmelCase: Optional[int] = hidden_act
__lowerCAmelCase: Optional[Any] = hidden_dropout_prob
__lowerCAmelCase: Tuple = attention_probs_dropout_prob
__lowerCAmelCase: int = initializer_range
__lowerCAmelCase: Optional[int] = layer_norm_eps
__lowerCAmelCase: Any = image_size
__lowerCAmelCase: str = patch_size
__lowerCAmelCase: List[Any] = num_channels
__lowerCAmelCase: Dict = qkv_bias
__lowerCAmelCase: Any = backbone_out_indices
if readout_type not in ["ignore", "add", "project"]:
raise ValueError('Readout_type must be one of [\'ignore\', \'add\', \'project\']' )
__lowerCAmelCase: str = readout_type
__lowerCAmelCase: int = reassemble_factors
__lowerCAmelCase: int = neck_hidden_sizes
__lowerCAmelCase: List[Any] = fusion_hidden_size
__lowerCAmelCase: int = head_in_index
__lowerCAmelCase: Optional[Any] = use_batch_norm_in_fusion_residual
# auxiliary head attributes (semantic segmentation)
__lowerCAmelCase: Optional[int] = use_auxiliary_head
__lowerCAmelCase: Tuple = auxiliary_loss_weight
__lowerCAmelCase: List[Any] = semantic_loss_ignore_index
__lowerCAmelCase: Dict = semantic_classifier_dropout
def UpperCAmelCase ( self : Union[str, Any] ) -> List[Any]:
__lowerCAmelCase: str = copy.deepcopy(self.__dict__ )
if output["backbone_config"] is not None:
__lowerCAmelCase: int = self.backbone_config.to_dict()
__lowerCAmelCase: Tuple = self.__class__.model_type
return output
| 322 |
import importlib
import math
import os
from dataclasses import dataclass
from enum import Enum
from typing import Any, Dict, Optional, Tuple, Union
import flax
import jax.numpy as jnp
from ..utils import BaseOutput
_a = '''scheduler_config.json'''
class A_ ( snake_case__ ):
_lowercase : Optional[Any] = 1
_lowercase : Tuple = 2
_lowercase : Dict = 3
_lowercase : int = 4
_lowercase : Optional[Any] = 5
@dataclass
class A_ ( snake_case__ ):
_lowercase : jnp.ndarray
class A_ :
_lowercase : Optional[int] = SCHEDULER_CONFIG_NAME
_lowercase : Dict = ['dtype']
_lowercase : int = []
_lowercase : Union[str, Any] = True
@classmethod
def UpperCAmelCase ( cls : Union[str, Any] , UpperCAmelCase : Dict[str, Any] = None , UpperCAmelCase : Optional[str] = None , UpperCAmelCase : List[str]=False , **UpperCAmelCase : Optional[int] , ) -> Tuple:
__lowerCAmelCase , __lowerCAmelCase: Optional[Any] = cls.load_config(
pretrained_model_name_or_path=UpperCAmelCase , subfolder=UpperCAmelCase , return_unused_kwargs=UpperCAmelCase , **UpperCAmelCase , )
__lowerCAmelCase , __lowerCAmelCase: Optional[Any] = cls.from_config(UpperCAmelCase , return_unused_kwargs=UpperCAmelCase , **UpperCAmelCase )
if hasattr(UpperCAmelCase , 'create_state' ) and getattr(UpperCAmelCase , 'has_state' , UpperCAmelCase ):
__lowerCAmelCase: Dict = scheduler.create_state()
if return_unused_kwargs:
return scheduler, state, unused_kwargs
return scheduler, state
def UpperCAmelCase ( self : Tuple , UpperCAmelCase : Union[str, os.PathLike] , UpperCAmelCase : bool = False , **UpperCAmelCase : Any ) -> List[str]:
self.save_config(save_directory=UpperCAmelCase , push_to_hub=UpperCAmelCase , **UpperCAmelCase )
@property
def UpperCAmelCase ( self : str ) -> Dict:
return self._get_compatibles()
@classmethod
def UpperCAmelCase ( cls : Optional[int] ) -> Any:
__lowerCAmelCase: Optional[int] = list(set([cls.__name__] + cls._compatibles ) )
__lowerCAmelCase: Dict = importlib.import_module(__name__.split('.' )[0] )
__lowerCAmelCase: Dict = [
getattr(UpperCAmelCase , UpperCAmelCase ) for c in compatible_classes_str if hasattr(UpperCAmelCase , UpperCAmelCase )
]
return compatible_classes
def _a ( SCREAMING_SNAKE_CASE : jnp.ndarray , SCREAMING_SNAKE_CASE : Tuple[int] ) -> jnp.ndarray:
"""simple docstring"""
assert len(SCREAMING_SNAKE_CASE ) >= x.ndim
return jnp.broadcast_to(x.reshape(x.shape + (1,) * (len(SCREAMING_SNAKE_CASE ) - x.ndim) ) , SCREAMING_SNAKE_CASE )
def _a ( SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : Any=0.9_9_9 , SCREAMING_SNAKE_CASE : List[Any]=jnp.floataa ) -> jnp.ndarray:
"""simple docstring"""
def alpha_bar(SCREAMING_SNAKE_CASE : str ):
return math.cos((time_step + 0.0_0_8) / 1.0_0_8 * math.pi / 2 ) ** 2
__lowerCAmelCase: str = []
for i in range(SCREAMING_SNAKE_CASE ):
__lowerCAmelCase: Union[str, Any] = i / num_diffusion_timesteps
__lowerCAmelCase: List[str] = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar(SCREAMING_SNAKE_CASE ) / alpha_bar(SCREAMING_SNAKE_CASE ) , SCREAMING_SNAKE_CASE ) )
return jnp.array(SCREAMING_SNAKE_CASE , dtype=SCREAMING_SNAKE_CASE )
@flax.struct.dataclass
class A_ :
_lowercase : jnp.ndarray
_lowercase : jnp.ndarray
_lowercase : jnp.ndarray
@classmethod
def UpperCAmelCase ( cls : str , UpperCAmelCase : Optional[int] ) -> Any:
__lowerCAmelCase: str = scheduler.config
if config.trained_betas is not None:
__lowerCAmelCase: Tuple = jnp.asarray(config.trained_betas , dtype=scheduler.dtype )
elif config.beta_schedule == "linear":
__lowerCAmelCase: Any = jnp.linspace(config.beta_start , config.beta_end , config.num_train_timesteps , dtype=scheduler.dtype )
elif config.beta_schedule == "scaled_linear":
# this schedule is very specific to the latent diffusion model.
__lowerCAmelCase: List[Any] = (
jnp.linspace(
config.beta_start**0.5 , config.beta_end**0.5 , config.num_train_timesteps , dtype=scheduler.dtype )
** 2
)
elif config.beta_schedule == "squaredcos_cap_v2":
# Glide cosine schedule
__lowerCAmelCase: str = betas_for_alpha_bar(config.num_train_timesteps , dtype=scheduler.dtype )
else:
raise NotImplementedError(
F'''beta_schedule {config.beta_schedule} is not implemented for scheduler {scheduler.__class__.__name__}''' )
__lowerCAmelCase: Optional[Any] = 1.0 - betas
__lowerCAmelCase: Optional[Any] = jnp.cumprod(UpperCAmelCase , axis=0 )
return cls(
alphas=UpperCAmelCase , betas=UpperCAmelCase , alphas_cumprod=UpperCAmelCase , )
def _a ( SCREAMING_SNAKE_CASE : CommonSchedulerState , SCREAMING_SNAKE_CASE : jnp.ndarray , SCREAMING_SNAKE_CASE : jnp.ndarray , SCREAMING_SNAKE_CASE : jnp.ndarray ) -> int:
"""simple docstring"""
__lowerCAmelCase: Optional[int] = state.alphas_cumprod
__lowerCAmelCase: str = alphas_cumprod[timesteps] ** 0.5
__lowerCAmelCase: Any = sqrt_alpha_prod.flatten()
__lowerCAmelCase: Any = broadcast_to_shape_from_left(SCREAMING_SNAKE_CASE , original_samples.shape )
__lowerCAmelCase: Any = (1 - alphas_cumprod[timesteps]) ** 0.5
__lowerCAmelCase: str = sqrt_one_minus_alpha_prod.flatten()
__lowerCAmelCase: str = broadcast_to_shape_from_left(SCREAMING_SNAKE_CASE , original_samples.shape )
return sqrt_alpha_prod, sqrt_one_minus_alpha_prod
def _a ( SCREAMING_SNAKE_CASE : CommonSchedulerState , SCREAMING_SNAKE_CASE : jnp.ndarray , SCREAMING_SNAKE_CASE : jnp.ndarray , SCREAMING_SNAKE_CASE : jnp.ndarray ) -> str:
"""simple docstring"""
__lowerCAmelCase , __lowerCAmelCase: Union[str, Any] = get_sqrt_alpha_prod(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
__lowerCAmelCase: Optional[int] = sqrt_alpha_prod * original_samples + sqrt_one_minus_alpha_prod * noise
return noisy_samples
def _a ( SCREAMING_SNAKE_CASE : CommonSchedulerState , SCREAMING_SNAKE_CASE : jnp.ndarray , SCREAMING_SNAKE_CASE : jnp.ndarray , SCREAMING_SNAKE_CASE : jnp.ndarray ) -> Any:
"""simple docstring"""
__lowerCAmelCase , __lowerCAmelCase: Tuple = get_sqrt_alpha_prod(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
__lowerCAmelCase: int = sqrt_alpha_prod * noise - sqrt_one_minus_alpha_prod * sample
return velocity
| 322 | 1 |
import logging
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
import evaluate
import numpy as np
import torch
from datasets import load_dataset
from PIL import Image
from torchvision.transforms import (
CenterCrop,
Compose,
Normalize,
RandomHorizontalFlip,
RandomResizedCrop,
Resize,
ToTensor,
)
import transformers
from transformers import (
MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING,
AutoConfig,
AutoImageProcessor,
AutoModelForImageClassification,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
_a = logging.getLogger(__name__)
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version('''4.31.0''')
require_version('''datasets>=1.8.0''', '''To fix: pip install -r examples/pytorch/image-classification/requirements.txt''')
_a = list(MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING.keys())
_a = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
def _a ( SCREAMING_SNAKE_CASE : str ) -> str:
"""simple docstring"""
with open(SCREAMING_SNAKE_CASE , 'rb' ) as f:
__lowerCAmelCase: List[str] = Image.open(SCREAMING_SNAKE_CASE )
return im.convert('RGB' )
@dataclass
class A_ :
_lowercase : Optional[str] = field(
default=snake_case__ , metadata={
'help': 'Name of a dataset from the hub (could be your own, possibly private dataset hosted on the hub).'
} , )
_lowercase : Optional[str] = field(
default=snake_case__ , metadata={'help': 'The configuration name of the dataset to use (via the datasets library).'} )
_lowercase : Optional[str] = field(default=snake_case__ , metadata={'help': 'A folder containing the training data.'} )
_lowercase : Optional[str] = field(default=snake_case__ , metadata={'help': 'A folder containing the validation data.'} )
_lowercase : Optional[float] = field(
default=0.15 , metadata={'help': 'Percent to split off of train for validation.'} )
_lowercase : Optional[int] = field(
default=snake_case__ , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of training examples to this '
'value if set.'
)
} , )
_lowercase : Optional[int] = field(
default=snake_case__ , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of evaluation examples to this '
'value if set.'
)
} , )
def UpperCAmelCase ( self : str ) -> int:
if self.dataset_name is None and (self.train_dir is None and self.validation_dir is None):
raise ValueError(
'You must specify either a dataset name from the hub or a train and/or validation directory.' )
@dataclass
class A_ :
_lowercase : str = field(
default='google/vit-base-patch16-224-in21k' , metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'} , )
_lowercase : Optional[str] = field(
default=snake_case__ , metadata={'help': 'If training from scratch, pass a model type from the list: ' + ', '.join(snake_case__ )} , )
_lowercase : Optional[str] = field(
default=snake_case__ , metadata={'help': 'Pretrained config name or path if not the same as model_name'} )
_lowercase : Optional[str] = field(
default=snake_case__ , metadata={'help': 'Where do you want to store the pretrained models downloaded from s3'} )
_lowercase : str = field(
default='main' , metadata={'help': 'The specific model version to use (can be a branch name, tag name or commit id).'} , )
_lowercase : str = field(default=snake_case__ , metadata={'help': 'Name or path of preprocessor config.'} )
_lowercase : bool = field(
default=snake_case__ , metadata={
'help': (
'Will use the token generated when running `huggingface-cli login` (necessary to use this script '
'with private models).'
)
} , )
_lowercase : bool = field(
default=snake_case__ , metadata={'help': 'Will enable to load a pretrained model whose head dimensions are different.'} , )
def _a ( SCREAMING_SNAKE_CASE : Union[str, Any] ) -> List[str]:
"""simple docstring"""
__lowerCAmelCase: Union[str, Any] = torch.stack([example['pixel_values'] for example in examples] )
__lowerCAmelCase: Optional[Any] = torch.tensor([example['labels'] for example in examples] )
return {"pixel_values": pixel_values, "labels": labels}
def _a ( ) -> int:
"""simple docstring"""
__lowerCAmelCase: List[str] = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('.json' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase: Dict = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase: int = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry('run_image_classification' , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# Setup logging
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
__lowerCAmelCase: Optional[int] = training_args.get_process_log_level()
logger.setLevel(SCREAMING_SNAKE_CASE )
transformers.utils.logging.set_verbosity(SCREAMING_SNAKE_CASE )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
f'''Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}'''
+ f'''distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}''' )
logger.info(f'''Training/evaluation parameters {training_args}''' )
# Detecting last checkpoint.
__lowerCAmelCase: Any = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
__lowerCAmelCase: str = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
f'''Output directory ({training_args.output_dir}) already exists and is not empty. '''
'Use --overwrite_output_dir to overcome.' )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
f'''Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change '''
'the `--output_dir` or add `--overwrite_output_dir` to train from scratch.' )
# Set seed before initializing model.
set_seed(training_args.seed )
# Initialize our dataset and prepare it for the 'image-classification' task.
if data_args.dataset_name is not None:
__lowerCAmelCase: Dict = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , cache_dir=model_args.cache_dir , task='image-classification' , use_auth_token=True if model_args.use_auth_token else None , )
else:
__lowerCAmelCase: Any = {}
if data_args.train_dir is not None:
__lowerCAmelCase: Optional[int] = os.path.join(data_args.train_dir , '**' )
if data_args.validation_dir is not None:
__lowerCAmelCase: Any = os.path.join(data_args.validation_dir , '**' )
__lowerCAmelCase: Any = load_dataset(
'imagefolder' , data_files=SCREAMING_SNAKE_CASE , cache_dir=model_args.cache_dir , task='image-classification' , )
# If we don't have a validation split, split off a percentage of train as validation.
__lowerCAmelCase: List[Any] = None if 'validation' in dataset.keys() else data_args.train_val_split
if isinstance(data_args.train_val_split , SCREAMING_SNAKE_CASE ) and data_args.train_val_split > 0.0:
__lowerCAmelCase: str = dataset['train'].train_test_split(data_args.train_val_split )
__lowerCAmelCase: List[Any] = split['train']
__lowerCAmelCase: Any = split['test']
# Prepare label mappings.
# We'll include these in the model's config to get human readable labels in the Inference API.
__lowerCAmelCase: Optional[int] = dataset['train'].features['labels'].names
__lowerCAmelCase , __lowerCAmelCase: Any = {}, {}
for i, label in enumerate(SCREAMING_SNAKE_CASE ):
__lowerCAmelCase: int = str(SCREAMING_SNAKE_CASE )
__lowerCAmelCase: Any = label
# Load the accuracy metric from the datasets package
__lowerCAmelCase: Tuple = evaluate.load('accuracy' )
# Define our compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with a
# predictions and label_ids field) and has to return a dictionary string to float.
def compute_metrics(SCREAMING_SNAKE_CASE : List[str] ):
return metric.compute(predictions=np.argmax(p.predictions , axis=1 ) , references=p.label_ids )
__lowerCAmelCase: Optional[int] = AutoConfig.from_pretrained(
model_args.config_name or model_args.model_name_or_path , num_labels=len(SCREAMING_SNAKE_CASE ) , labelaid=SCREAMING_SNAKE_CASE , idalabel=SCREAMING_SNAKE_CASE , finetuning_task='image-classification' , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
__lowerCAmelCase: List[str] = AutoModelForImageClassification.from_pretrained(
model_args.model_name_or_path , from_tf=bool('.ckpt' in model_args.model_name_or_path ) , config=SCREAMING_SNAKE_CASE , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ignore_mismatched_sizes=model_args.ignore_mismatched_sizes , )
__lowerCAmelCase: Dict = AutoImageProcessor.from_pretrained(
model_args.image_processor_name or model_args.model_name_or_path , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# Define torchvision transforms to be applied to each image.
if "shortest_edge" in image_processor.size:
__lowerCAmelCase: int = image_processor.size['shortest_edge']
else:
__lowerCAmelCase: List[Any] = (image_processor.size['height'], image_processor.size['width'])
__lowerCAmelCase: int = Normalize(mean=image_processor.image_mean , std=image_processor.image_std )
__lowerCAmelCase: Union[str, Any] = Compose(
[
RandomResizedCrop(SCREAMING_SNAKE_CASE ),
RandomHorizontalFlip(),
ToTensor(),
normalize,
] )
__lowerCAmelCase: int = Compose(
[
Resize(SCREAMING_SNAKE_CASE ),
CenterCrop(SCREAMING_SNAKE_CASE ),
ToTensor(),
normalize,
] )
def train_transforms(SCREAMING_SNAKE_CASE : Optional[Any] ):
__lowerCAmelCase: List[Any] = [
_train_transforms(pil_img.convert('RGB' ) ) for pil_img in example_batch['image']
]
return example_batch
def val_transforms(SCREAMING_SNAKE_CASE : Union[str, Any] ):
__lowerCAmelCase: Tuple = [_val_transforms(pil_img.convert('RGB' ) ) for pil_img in example_batch['image']]
return example_batch
if training_args.do_train:
if "train" not in dataset:
raise ValueError('--do_train requires a train dataset' )
if data_args.max_train_samples is not None:
__lowerCAmelCase: Optional[int] = (
dataset['train'].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) )
)
# Set the training transforms
dataset["train"].set_transform(SCREAMING_SNAKE_CASE )
if training_args.do_eval:
if "validation" not in dataset:
raise ValueError('--do_eval requires a validation dataset' )
if data_args.max_eval_samples is not None:
__lowerCAmelCase: Any = (
dataset['validation'].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) )
)
# Set the validation transforms
dataset["validation"].set_transform(SCREAMING_SNAKE_CASE )
# Initalize our trainer
__lowerCAmelCase: Union[str, Any] = Trainer(
model=SCREAMING_SNAKE_CASE , args=SCREAMING_SNAKE_CASE , train_dataset=dataset['train'] if training_args.do_train else None , eval_dataset=dataset['validation'] if training_args.do_eval else None , compute_metrics=SCREAMING_SNAKE_CASE , tokenizer=SCREAMING_SNAKE_CASE , data_collator=SCREAMING_SNAKE_CASE , )
# Training
if training_args.do_train:
__lowerCAmelCase: List[Any] = None
if training_args.resume_from_checkpoint is not None:
__lowerCAmelCase: List[str] = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
__lowerCAmelCase: Optional[int] = last_checkpoint
__lowerCAmelCase: Any = trainer.train(resume_from_checkpoint=SCREAMING_SNAKE_CASE )
trainer.save_model()
trainer.log_metrics('train' , train_result.metrics )
trainer.save_metrics('train' , train_result.metrics )
trainer.save_state()
# Evaluation
if training_args.do_eval:
__lowerCAmelCase: Tuple = trainer.evaluate()
trainer.log_metrics('eval' , SCREAMING_SNAKE_CASE )
trainer.save_metrics('eval' , SCREAMING_SNAKE_CASE )
# Write model card and (optionally) push to hub
__lowerCAmelCase: Optional[int] = {
'finetuned_from': model_args.model_name_or_path,
'tasks': 'image-classification',
'dataset': data_args.dataset_name,
'tags': ['image-classification', 'vision'],
}
if training_args.push_to_hub:
trainer.push_to_hub(**SCREAMING_SNAKE_CASE )
else:
trainer.create_model_card(**SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
main()
| 322 |
_a = {
'''A''': ['''B''', '''C''', '''E'''],
'''B''': ['''A''', '''D''', '''E'''],
'''C''': ['''A''', '''F''', '''G'''],
'''D''': ['''B'''],
'''E''': ['''A''', '''B''', '''D'''],
'''F''': ['''C'''],
'''G''': ['''C'''],
}
def _a ( SCREAMING_SNAKE_CASE : dict , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : Any ) -> list[str]:
"""simple docstring"""
__lowerCAmelCase: int = set()
# keep track of all the paths to be checked
__lowerCAmelCase: str = [[start]]
# return path if start is goal
if start == goal:
return [start]
# keeps looping until all possible paths have been checked
while queue:
# pop the first path from the queue
__lowerCAmelCase: str = queue.pop(0 )
# get the last node from the path
__lowerCAmelCase: Union[str, Any] = path[-1]
if node not in explored:
__lowerCAmelCase: Dict = graph[node]
# go through all neighbour nodes, construct a new path and
# push it into the queue
for neighbour in neighbours:
__lowerCAmelCase: Dict = list(SCREAMING_SNAKE_CASE )
new_path.append(SCREAMING_SNAKE_CASE )
queue.append(SCREAMING_SNAKE_CASE )
# return path if neighbour is goal
if neighbour == goal:
return new_path
# mark node as explored
explored.add(SCREAMING_SNAKE_CASE )
# in case there's no path between the 2 nodes
return []
def _a ( SCREAMING_SNAKE_CASE : dict , SCREAMING_SNAKE_CASE : Union[str, Any] , SCREAMING_SNAKE_CASE : Any ) -> int:
"""simple docstring"""
if not graph or start not in graph or target not in graph:
return -1
if start == target:
return 0
__lowerCAmelCase: Optional[int] = [start]
__lowerCAmelCase: Dict = set(SCREAMING_SNAKE_CASE )
# Keep tab on distances from `start` node.
__lowerCAmelCase: Optional[int] = {start: 0, target: -1}
while queue:
__lowerCAmelCase: Any = queue.pop(0 )
if node == target:
__lowerCAmelCase: Optional[int] = (
dist[node] if dist[target] == -1 else min(dist[target] , dist[node] )
)
for adjacent in graph[node]:
if adjacent not in visited:
visited.add(SCREAMING_SNAKE_CASE )
queue.append(SCREAMING_SNAKE_CASE )
__lowerCAmelCase: Union[str, Any] = dist[node] + 1
return dist[target]
if __name__ == "__main__":
print(bfs_shortest_path(demo_graph, '''G''', '''D''')) # returns ['G', 'C', 'A', 'B', 'D']
print(bfs_shortest_path_distance(demo_graph, '''G''', '''D''')) # returns 4
| 322 | 1 |
import argparse
import torch
from transformers import (
SpeechTaConfig,
SpeechTaFeatureExtractor,
SpeechTaForSpeechToSpeech,
SpeechTaForSpeechToText,
SpeechTaForTextToSpeech,
SpeechTaProcessor,
SpeechTaTokenizer,
logging,
)
from transformers.tokenization_utils import AddedToken
logging.set_verbosity_info()
_a = logging.get_logger('''transformers.models.speecht5''')
_a = {
'''speech_encoder_prenet.layer_norm''': '''speecht5.encoder.prenet.feature_projection.layer_norm''',
'''speech_encoder_prenet.post_extract_proj''': '''speecht5.encoder.prenet.feature_projection.projection''',
'''speech_encoder_prenet.pos_conv.0''': '''speecht5.encoder.prenet.pos_conv_embed.conv''',
'''speech_encoder_prenet.mask_emb''': '''speecht5.encoder.prenet.masked_spec_embed''',
}
_a = {
'''text_encoder_prenet.encoder_prenet.0''': '''speecht5.encoder.prenet.embed_tokens''',
'''text_encoder_prenet.encoder_prenet.1.alpha''': '''speecht5.encoder.prenet.encode_positions.alpha''',
}
_a = {
'''speech_decoder_prenet.decoder_prenet.0.0.prenet.0.0''': '''speecht5.decoder.prenet.layers.0''',
'''speech_decoder_prenet.decoder_prenet.0.0.prenet.1.0''': '''speecht5.decoder.prenet.layers.1''',
'''speech_decoder_prenet.decoder_prenet.0.1''': '''speecht5.decoder.prenet.final_layer''',
'''speech_decoder_prenet.decoder_prenet.1.alpha''': '''speecht5.decoder.prenet.encode_positions.alpha''',
'''speech_decoder_prenet.spkembs_layer.0''': '''speecht5.decoder.prenet.speaker_embeds_layer''',
}
_a = {
'''speech_decoder_postnet.feat_out''': '''speech_decoder_postnet.feat_out''',
'''speech_decoder_postnet.prob_out''': '''speech_decoder_postnet.prob_out''',
'''speech_decoder_postnet.postnet.postnet.0.0''': '''speech_decoder_postnet.layers.0.conv''',
'''speech_decoder_postnet.postnet.postnet.0.1''': '''speech_decoder_postnet.layers.0.batch_norm''',
'''speech_decoder_postnet.postnet.postnet.1.0''': '''speech_decoder_postnet.layers.1.conv''',
'''speech_decoder_postnet.postnet.postnet.1.1''': '''speech_decoder_postnet.layers.1.batch_norm''',
'''speech_decoder_postnet.postnet.postnet.2.0''': '''speech_decoder_postnet.layers.2.conv''',
'''speech_decoder_postnet.postnet.postnet.2.1''': '''speech_decoder_postnet.layers.2.batch_norm''',
'''speech_decoder_postnet.postnet.postnet.3.0''': '''speech_decoder_postnet.layers.3.conv''',
'''speech_decoder_postnet.postnet.postnet.3.1''': '''speech_decoder_postnet.layers.3.batch_norm''',
'''speech_decoder_postnet.postnet.postnet.4.0''': '''speech_decoder_postnet.layers.4.conv''',
'''speech_decoder_postnet.postnet.postnet.4.1''': '''speech_decoder_postnet.layers.4.batch_norm''',
}
_a = {
'''text_decoder_prenet.embed_tokens''': '''speecht5.decoder.prenet.embed_tokens''',
}
_a = {
'''text_decoder_postnet.output_projection''': '''text_decoder_postnet.lm_head''',
}
_a = {
'''encoder.layers.*.self_attn.k_proj''': '''speecht5.encoder.wrapped_encoder.layers.*.attention.k_proj''',
'''encoder.layers.*.self_attn.v_proj''': '''speecht5.encoder.wrapped_encoder.layers.*.attention.v_proj''',
'''encoder.layers.*.self_attn.q_proj''': '''speecht5.encoder.wrapped_encoder.layers.*.attention.q_proj''',
'''encoder.layers.*.self_attn.out_proj''': '''speecht5.encoder.wrapped_encoder.layers.*.attention.out_proj''',
'''encoder.layers.*.self_attn_layer_norm''': '''speecht5.encoder.wrapped_encoder.layers.*.layer_norm''',
'''encoder.layers.*.fc1''': '''speecht5.encoder.wrapped_encoder.layers.*.feed_forward.intermediate_dense''',
'''encoder.layers.*.fc2''': '''speecht5.encoder.wrapped_encoder.layers.*.feed_forward.output_dense''',
'''encoder.layers.*.final_layer_norm''': '''speecht5.encoder.wrapped_encoder.layers.*.final_layer_norm''',
'''encoder.layer_norm''': '''speecht5.encoder.wrapped_encoder.layer_norm''',
'''encoder.pos_emb.pe_k''': '''speecht5.encoder.wrapped_encoder.embed_positions.pe_k''',
}
_a = {
'''decoder.layers.*.self_attn.k_proj''': '''speecht5.decoder.wrapped_decoder.layers.*.self_attn.k_proj''',
'''decoder.layers.*.self_attn.v_proj''': '''speecht5.decoder.wrapped_decoder.layers.*.self_attn.v_proj''',
'''decoder.layers.*.self_attn.q_proj''': '''speecht5.decoder.wrapped_decoder.layers.*.self_attn.q_proj''',
'''decoder.layers.*.self_attn.out_proj''': '''speecht5.decoder.wrapped_decoder.layers.*.self_attn.out_proj''',
'''decoder.layers.*.self_attn_layer_norm''': '''speecht5.decoder.wrapped_decoder.layers.*.self_attn_layer_norm''',
'''decoder.layers.*.encoder_attn.k_proj''': '''speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.k_proj''',
'''decoder.layers.*.encoder_attn.v_proj''': '''speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.v_proj''',
'''decoder.layers.*.encoder_attn.q_proj''': '''speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.q_proj''',
'''decoder.layers.*.encoder_attn.out_proj''': '''speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.out_proj''',
'''decoder.layers.*.encoder_attn_layer_norm''': '''speecht5.decoder.wrapped_decoder.layers.*.encoder_attn_layer_norm''',
'''decoder.layers.*.fc1''': '''speecht5.decoder.wrapped_decoder.layers.*.feed_forward.intermediate_dense''',
'''decoder.layers.*.fc2''': '''speecht5.decoder.wrapped_decoder.layers.*.feed_forward.output_dense''',
'''decoder.layers.*.final_layer_norm''': '''speecht5.decoder.wrapped_decoder.layers.*.final_layer_norm''',
}
_a = {
**MAPPING_SPEECH_ENCODER_PRENET,
**MAPPING_ENCODER,
**MAPPING_DECODER,
**MAPPING_TEXT_DECODER_PRENET,
**MAPPING_TEXT_DECODER_POSTNET,
}
_a = {
**MAPPING_TEXT_ENCODER_PRENET,
**MAPPING_ENCODER,
**MAPPING_DECODER,
**MAPPING_SPEECH_DECODER_PRENET,
**MAPPING_SPEECH_DECODER_POSTNET,
}
_a = {
**MAPPING_SPEECH_ENCODER_PRENET,
**MAPPING_ENCODER,
**MAPPING_DECODER,
**MAPPING_SPEECH_DECODER_PRENET,
**MAPPING_SPEECH_DECODER_POSTNET,
}
_a = []
_a = [
'''encoder.version''',
'''encoder.layers.*.norm_k.weight''',
'''encoder.layers.*.norm_k.bias''',
'''decoder.version''',
'''decoder.layers.*.norm_k.weight''',
'''decoder.layers.*.norm_k.bias''',
'''decoder.pos_emb.pe_k''',
'''speech_encoder_prenet.embed_positions._float_tensor''',
'''text_decoder_prenet.embed_positions._float_tensor''',
]
_a = IGNORE_KEYS + [
'''encoder.proj''',
'''text_encoder_prenet.*''',
'''speech_decoder_prenet.*''',
'''speech_decoder_postnet.*''',
]
_a = IGNORE_KEYS + [
'''encoder.proj''',
'''speech_encoder_prenet.*''',
'''text_decoder_prenet.*''',
'''text_decoder_postnet.*''',
]
_a = IGNORE_KEYS + [
'''encoder.proj''',
'''text_encoder_prenet.*''',
'''text_decoder_prenet.*''',
'''text_decoder_postnet.*''',
]
def _a ( SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : Tuple ) -> Optional[int]:
"""simple docstring"""
for attribute in key.split('.' ):
__lowerCAmelCase: Dict = getattr(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
if weight_type is not None:
__lowerCAmelCase: List[str] = getattr(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ).shape
else:
__lowerCAmelCase: Optional[Any] = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
f'''Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be'''
f''' {value.shape} for {full_name}''' )
if weight_type == "weight":
__lowerCAmelCase: Any = value
elif weight_type == "weight_g":
__lowerCAmelCase: Dict = value
elif weight_type == "weight_v":
__lowerCAmelCase: str = value
elif weight_type == "bias":
__lowerCAmelCase: List[str] = value
elif weight_type == "running_mean":
__lowerCAmelCase: Dict = value
elif weight_type == "running_var":
__lowerCAmelCase: Dict = value
elif weight_type == "num_batches_tracked":
__lowerCAmelCase: Union[str, Any] = value
else:
__lowerCAmelCase: str = value
logger.info(f'''{key + ('.' + weight_type if weight_type is not None else '')} was initialized from {full_name}.''' )
def _a ( SCREAMING_SNAKE_CASE : List[str] , SCREAMING_SNAKE_CASE : Dict ) -> Dict:
"""simple docstring"""
for key in ignore_keys:
if key.endswith('.*' ):
if name.startswith(key[:-1] ):
return True
elif ".*." in key:
__lowerCAmelCase , __lowerCAmelCase: Dict = key.split('.*.' )
if prefix in name and suffix in name:
return True
elif key in name:
return True
return False
def _a ( SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : str ) -> Optional[Any]:
"""simple docstring"""
__lowerCAmelCase: str = []
if task == "s2t":
__lowerCAmelCase: List[str] = hf_model.speechta.encoder.prenet.feature_encoder
__lowerCAmelCase: int = MAPPING_S2T
__lowerCAmelCase: str = IGNORE_KEYS_S2T
elif task == "t2s":
__lowerCAmelCase: str = None
__lowerCAmelCase: str = MAPPING_T2S
__lowerCAmelCase: Union[str, Any] = IGNORE_KEYS_T2S
elif task == "s2s":
__lowerCAmelCase: Any = hf_model.speechta.encoder.prenet.feature_encoder
__lowerCAmelCase: Dict = MAPPING_S2S
__lowerCAmelCase: Optional[int] = IGNORE_KEYS_S2S
else:
raise ValueError(f'''Unsupported task: {task}''' )
for name, value in fairseq_dict.items():
if should_ignore(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
logger.info(f'''{name} was ignored''' )
continue
__lowerCAmelCase: str = False
if "conv_layers" in name:
load_conv_layer(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , hf_model.config.feat_extract_norm == 'group' , )
__lowerCAmelCase: int = True
else:
for key, mapped_key in MAPPING.items():
# mapped_key = "speecht5." + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if "*" in key:
__lowerCAmelCase , __lowerCAmelCase: Any = key.split('.*.' )
if prefix in name and suffix in name:
__lowerCAmelCase: Tuple = suffix
# if key in name or key.split("w2v_model.")[-1] == name.split(".")[0]:
if key in name:
__lowerCAmelCase: Union[str, Any] = True
if "*" in mapped_key:
__lowerCAmelCase: str = name.split(SCREAMING_SNAKE_CASE )[0].split('.' )[-2]
__lowerCAmelCase: Optional[Any] = mapped_key.replace('*' , SCREAMING_SNAKE_CASE )
if "weight_g" in name:
__lowerCAmelCase: Dict = 'weight_g'
elif "weight_v" in name:
__lowerCAmelCase: int = 'weight_v'
elif "bias" in name:
__lowerCAmelCase: Optional[Any] = 'bias'
elif "weight" in name:
__lowerCAmelCase: Tuple = 'weight'
elif "running_mean" in name:
__lowerCAmelCase: Any = 'running_mean'
elif "running_var" in name:
__lowerCAmelCase: Dict = 'running_var'
elif "num_batches_tracked" in name:
__lowerCAmelCase: Any = 'num_batches_tracked'
else:
__lowerCAmelCase: str = None
set_recursively(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
continue
if not is_used:
unused_weights.append(SCREAMING_SNAKE_CASE )
logger.warning(f'''Unused weights: {unused_weights}''' )
def _a ( SCREAMING_SNAKE_CASE : Dict , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : List[str] , SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : Optional[int] ) -> Any:
"""simple docstring"""
__lowerCAmelCase: Tuple = full_name.split('conv_layers.' )[-1]
__lowerCAmelCase: str = name.split('.' )
__lowerCAmelCase: Tuple = int(items[0] )
__lowerCAmelCase: str = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.''' )
__lowerCAmelCase: str = value
logger.info(f'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.''' )
__lowerCAmelCase: List[str] = value
logger.info(f'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found.''' )
__lowerCAmelCase: List[str] = value
logger.info(f'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found.''' )
__lowerCAmelCase: Any = value
logger.info(f'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
else:
unused_weights.append(SCREAMING_SNAKE_CASE )
@torch.no_grad()
def _a ( SCREAMING_SNAKE_CASE : Any , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : Dict , SCREAMING_SNAKE_CASE : int=None , SCREAMING_SNAKE_CASE : Optional[int]=None , SCREAMING_SNAKE_CASE : List[Any]=None , ) -> Optional[int]:
"""simple docstring"""
if config_path is not None:
__lowerCAmelCase: str = SpeechTaConfig.from_pretrained(SCREAMING_SNAKE_CASE )
else:
__lowerCAmelCase: List[str] = SpeechTaConfig()
if task == "s2t":
__lowerCAmelCase: Any = config.max_text_positions
__lowerCAmelCase: Tuple = SpeechTaForSpeechToText(SCREAMING_SNAKE_CASE )
elif task == "t2s":
__lowerCAmelCase: Any = 18_76
__lowerCAmelCase: Dict = 6_00
__lowerCAmelCase: List[Any] = config.max_speech_positions
__lowerCAmelCase: int = SpeechTaForTextToSpeech(SCREAMING_SNAKE_CASE )
elif task == "s2s":
__lowerCAmelCase: Dict = 18_76
__lowerCAmelCase: Tuple = config.max_speech_positions
__lowerCAmelCase: Optional[int] = SpeechTaForSpeechToSpeech(SCREAMING_SNAKE_CASE )
else:
raise ValueError(f'''Unknown task name: {task}''' )
if vocab_path:
__lowerCAmelCase: List[Any] = SpeechTaTokenizer(SCREAMING_SNAKE_CASE , model_max_length=config.max_text_positions )
# Mask token behaves like a normal word, i.e. include the space before it
__lowerCAmelCase: List[str] = AddedToken('<mask>' , lstrip=SCREAMING_SNAKE_CASE , rstrip=SCREAMING_SNAKE_CASE )
__lowerCAmelCase: List[Any] = mask_token
tokenizer.add_special_tokens({'mask_token': mask_token} )
tokenizer.add_tokens(['<ctc_blank>'] )
__lowerCAmelCase: Optional[Any] = SpeechTaFeatureExtractor()
__lowerCAmelCase: Tuple = SpeechTaProcessor(tokenizer=SCREAMING_SNAKE_CASE , feature_extractor=SCREAMING_SNAKE_CASE )
processor.save_pretrained(SCREAMING_SNAKE_CASE )
__lowerCAmelCase: Union[str, Any] = torch.load(SCREAMING_SNAKE_CASE )
recursively_load_weights(fairseq_checkpoint['model'] , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
model.save_pretrained(SCREAMING_SNAKE_CASE )
if repo_id:
print('Pushing to the hub...' )
processor.push_to_hub(SCREAMING_SNAKE_CASE )
model.push_to_hub(SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
_a = argparse.ArgumentParser()
parser.add_argument(
'''--task''',
default='''s2t''',
type=str,
help='''Type of the SpeechT5 model you\'d like to convert. Should be one of \'s2t\', \'t2s\', \'s2s\'.''',
)
parser.add_argument('''--checkpoint_path''', required=True, default=None, type=str, help='''Path to fairseq checkpoint''')
parser.add_argument('''--vocab_path''', default=None, type=str, help='''Path to SentencePiece model''')
parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''')
parser.add_argument(
'''--pytorch_dump_folder_path''', required=True, default=None, type=str, help='''Path to the output PyTorch model.'''
)
parser.add_argument(
'''--push_to_hub''', default=None, type=str, help='''Where to upload the converted model on the 🤗 hub.'''
)
_a = parser.parse_args()
convert_speechta_checkpoint(
args.task,
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.config_path,
args.vocab_path,
args.push_to_hub,
)
| 322 |
import warnings
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class A_ ( snake_case__ ):
_lowercase : int = ['image_processor', 'tokenizer']
_lowercase : Union[str, Any] = 'LayoutLMv3ImageProcessor'
_lowercase : List[str] = ('LayoutLMv3Tokenizer', 'LayoutLMv3TokenizerFast')
def __init__( self : Any , UpperCAmelCase : Dict=None , UpperCAmelCase : Tuple=None , **UpperCAmelCase : Optional[Any] ) -> str:
__lowerCAmelCase: str = None
if "feature_extractor" in kwargs:
warnings.warn(
'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'
' instead.' , UpperCAmelCase , )
__lowerCAmelCase: List[Any] = kwargs.pop('feature_extractor' )
__lowerCAmelCase: Tuple = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('You need to specify an `image_processor`.' )
if tokenizer is None:
raise ValueError('You need to specify a `tokenizer`.' )
super().__init__(UpperCAmelCase , UpperCAmelCase )
def __call__( self : Optional[int] , UpperCAmelCase : List[Any] , UpperCAmelCase : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , UpperCAmelCase : Optional[Union[PreTokenizedInput, List[PreTokenizedInput]]] = None , UpperCAmelCase : Union[List[List[int]], List[List[List[int]]]] = None , UpperCAmelCase : Optional[Union[List[int], List[List[int]]]] = None , UpperCAmelCase : bool = True , UpperCAmelCase : Union[bool, str, PaddingStrategy] = False , UpperCAmelCase : Union[bool, str, TruncationStrategy] = None , UpperCAmelCase : Optional[int] = None , UpperCAmelCase : int = 0 , UpperCAmelCase : Optional[int] = None , UpperCAmelCase : Optional[bool] = None , UpperCAmelCase : Optional[bool] = None , UpperCAmelCase : bool = False , UpperCAmelCase : bool = False , UpperCAmelCase : bool = False , UpperCAmelCase : bool = False , UpperCAmelCase : bool = True , UpperCAmelCase : Optional[Union[str, TensorType]] = None , **UpperCAmelCase : Tuple , ) -> BatchEncoding:
# verify input
if self.image_processor.apply_ocr and (boxes is not None):
raise ValueError(
'You cannot provide bounding boxes if you initialized the image processor with apply_ocr set to True.' )
if self.image_processor.apply_ocr and (word_labels is not None):
raise ValueError(
'You cannot provide word labels if you initialized the image processor with apply_ocr set to True.' )
# first, apply the image processor
__lowerCAmelCase: str = self.image_processor(images=UpperCAmelCase , return_tensors=UpperCAmelCase )
# second, apply the tokenizer
if text is not None and self.image_processor.apply_ocr and text_pair is None:
if isinstance(UpperCAmelCase , UpperCAmelCase ):
__lowerCAmelCase: Tuple = [text] # add batch dimension (as the image processor always adds a batch dimension)
__lowerCAmelCase: List[str] = features['words']
__lowerCAmelCase: List[Any] = self.tokenizer(
text=text if text is not None else features['words'] , text_pair=text_pair if text_pair is not None else None , boxes=boxes if boxes is not None else features['boxes'] , word_labels=UpperCAmelCase , add_special_tokens=UpperCAmelCase , padding=UpperCAmelCase , truncation=UpperCAmelCase , max_length=UpperCAmelCase , stride=UpperCAmelCase , pad_to_multiple_of=UpperCAmelCase , return_token_type_ids=UpperCAmelCase , return_attention_mask=UpperCAmelCase , return_overflowing_tokens=UpperCAmelCase , return_special_tokens_mask=UpperCAmelCase , return_offsets_mapping=UpperCAmelCase , return_length=UpperCAmelCase , verbose=UpperCAmelCase , return_tensors=UpperCAmelCase , **UpperCAmelCase , )
# add pixel values
__lowerCAmelCase: Tuple = features.pop('pixel_values' )
if return_overflowing_tokens is True:
__lowerCAmelCase: int = self.get_overflowing_images(UpperCAmelCase , encoded_inputs['overflow_to_sample_mapping'] )
__lowerCAmelCase: str = images
return encoded_inputs
def UpperCAmelCase ( self : Optional[int] , UpperCAmelCase : List[Any] , UpperCAmelCase : Union[str, Any] ) -> List[str]:
# in case there's an overflow, ensure each `input_ids` sample is mapped to its corresponding image
__lowerCAmelCase: str = []
for sample_idx in overflow_to_sample_mapping:
images_with_overflow.append(images[sample_idx] )
if len(UpperCAmelCase ) != len(UpperCAmelCase ):
raise ValueError(
'Expected length of images to be the same as the length of `overflow_to_sample_mapping`, but got'
F''' {len(UpperCAmelCase )} and {len(UpperCAmelCase )}''' )
return images_with_overflow
def UpperCAmelCase ( self : Optional[int] , *UpperCAmelCase : Optional[int] , **UpperCAmelCase : Dict ) -> Union[str, Any]:
return self.tokenizer.batch_decode(*UpperCAmelCase , **UpperCAmelCase )
def UpperCAmelCase ( self : Any , *UpperCAmelCase : Dict , **UpperCAmelCase : Any ) -> List[str]:
return self.tokenizer.decode(*UpperCAmelCase , **UpperCAmelCase )
@property
def UpperCAmelCase ( self : Union[str, Any] ) -> str:
return ["input_ids", "bbox", "attention_mask", "pixel_values"]
@property
def UpperCAmelCase ( self : str ) -> Union[str, Any]:
warnings.warn(
'`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.' , UpperCAmelCase , )
return self.image_processor_class
@property
def UpperCAmelCase ( self : Union[str, Any] ) -> Optional[Any]:
warnings.warn(
'`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.' , UpperCAmelCase , )
return self.image_processor
| 322 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
_a = {
'''configuration_conditional_detr''': [
'''CONDITIONAL_DETR_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''ConditionalDetrConfig''',
'''ConditionalDetrOnnxConfig''',
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a = ['''ConditionalDetrFeatureExtractor''']
_a = ['''ConditionalDetrImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a = [
'''CONDITIONAL_DETR_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ConditionalDetrForObjectDetection''',
'''ConditionalDetrForSegmentation''',
'''ConditionalDetrModel''',
'''ConditionalDetrPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_conditional_detr import (
CONDITIONAL_DETR_PRETRAINED_CONFIG_ARCHIVE_MAP,
ConditionalDetrConfig,
ConditionalDetrOnnxConfig,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_conditional_detr import ConditionalDetrFeatureExtractor
from .image_processing_conditional_detr import ConditionalDetrImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_conditional_detr import (
CONDITIONAL_DETR_PRETRAINED_MODEL_ARCHIVE_LIST,
ConditionalDetrForObjectDetection,
ConditionalDetrForSegmentation,
ConditionalDetrModel,
ConditionalDetrPreTrainedModel,
)
else:
import sys
_a = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 322 |
import argparse
from pathlib import Path
import torch
from packaging import version
from torch.onnx import export
from diffusers import AutoencoderKL
_a = version.parse(version.parse(torch.__version__).base_version) < version.parse('''1.11''')
def _a ( SCREAMING_SNAKE_CASE : Any , SCREAMING_SNAKE_CASE : tuple , SCREAMING_SNAKE_CASE : Path , SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : List[Any] , SCREAMING_SNAKE_CASE : Union[str, Any] , SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : Optional[int]=False , ) -> str:
"""simple docstring"""
output_path.parent.mkdir(parents=SCREAMING_SNAKE_CASE , exist_ok=SCREAMING_SNAKE_CASE )
# PyTorch deprecated the `enable_onnx_checker` and `use_external_data_format` arguments in v1.11,
# so we check the torch version for backwards compatibility
if is_torch_less_than_1_11:
export(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , f=output_path.as_posix() , input_names=SCREAMING_SNAKE_CASE , output_names=SCREAMING_SNAKE_CASE , dynamic_axes=SCREAMING_SNAKE_CASE , do_constant_folding=SCREAMING_SNAKE_CASE , use_external_data_format=SCREAMING_SNAKE_CASE , enable_onnx_checker=SCREAMING_SNAKE_CASE , opset_version=SCREAMING_SNAKE_CASE , )
else:
export(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , f=output_path.as_posix() , input_names=SCREAMING_SNAKE_CASE , output_names=SCREAMING_SNAKE_CASE , dynamic_axes=SCREAMING_SNAKE_CASE , do_constant_folding=SCREAMING_SNAKE_CASE , opset_version=SCREAMING_SNAKE_CASE , )
@torch.no_grad()
def _a ( SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : bool = False ) -> Union[str, Any]:
"""simple docstring"""
__lowerCAmelCase: List[Any] = torch.floataa if fpaa else torch.floataa
if fpaa and torch.cuda.is_available():
__lowerCAmelCase: str = 'cuda'
elif fpaa and not torch.cuda.is_available():
raise ValueError('`float16` model export is only supported on GPUs with CUDA' )
else:
__lowerCAmelCase: Dict = 'cpu'
__lowerCAmelCase: Optional[int] = Path(SCREAMING_SNAKE_CASE )
# VAE DECODER
__lowerCAmelCase: Optional[Any] = AutoencoderKL.from_pretrained(model_path + '/vae' )
__lowerCAmelCase: Union[str, Any] = vae_decoder.config.latent_channels
# forward only through the decoder part
__lowerCAmelCase: Any = vae_decoder.decode
onnx_export(
SCREAMING_SNAKE_CASE , model_args=(
torch.randn(1 , SCREAMING_SNAKE_CASE , 25 , 25 ).to(device=SCREAMING_SNAKE_CASE , dtype=SCREAMING_SNAKE_CASE ),
False,
) , output_path=output_path / 'vae_decoder' / 'model.onnx' , ordered_input_names=['latent_sample', 'return_dict'] , output_names=['sample'] , dynamic_axes={
'latent_sample': {0: 'batch', 1: 'channels', 2: 'height', 3: 'width'},
} , opset=SCREAMING_SNAKE_CASE , )
del vae_decoder
if __name__ == "__main__":
_a = argparse.ArgumentParser()
parser.add_argument(
'''--model_path''',
type=str,
required=True,
help='''Path to the `diffusers` checkpoint to convert (either a local directory or on the Hub).''',
)
parser.add_argument('''--output_path''', type=str, required=True, help='''Path to the output model.''')
parser.add_argument(
'''--opset''',
default=1_4,
type=int,
help='''The version of the ONNX operator set to use.''',
)
parser.add_argument('''--fp16''', action='''store_true''', default=False, help='''Export the models in `float16` mode''')
_a = parser.parse_args()
print(args.output_path)
convert_models(args.model_path, args.output_path, args.opset, args.fpaa)
print('''SD: Done: ONNX''')
| 322 | 1 |
def _a ( SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : int ) -> float:
"""simple docstring"""
__lowerCAmelCase: Optional[Any] = (num_of_terms / 2) * (2 * first_term + (num_of_terms - 1) * common_diff)
# formula for sum of series
return total
def _a ( ) -> str:
"""simple docstring"""
print(sum_of_series(1 , 1 , 10 ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 322 |
def _a ( SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : list[list[int]] ) -> int:
"""simple docstring"""
def update_area_of_max_square(SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : int ) -> int:
# BASE CASE
if row >= rows or col >= cols:
return 0
__lowerCAmelCase: Union[str, Any] = update_area_of_max_square(SCREAMING_SNAKE_CASE , col + 1 )
__lowerCAmelCase: Tuple = update_area_of_max_square(row + 1 , col + 1 )
__lowerCAmelCase: int = update_area_of_max_square(row + 1 , SCREAMING_SNAKE_CASE )
if mat[row][col]:
__lowerCAmelCase: List[str] = 1 + min([right, diagonal, down] )
__lowerCAmelCase: List[str] = max(largest_square_area[0] , SCREAMING_SNAKE_CASE )
return sub_problem_sol
else:
return 0
__lowerCAmelCase: List[str] = [0]
update_area_of_max_square(0 , 0 )
return largest_square_area[0]
def _a ( SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : list[list[int]] ) -> int:
"""simple docstring"""
def update_area_of_max_square_using_dp_array(
SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : list[list[int]] ) -> int:
if row >= rows or col >= cols:
return 0
if dp_array[row][col] != -1:
return dp_array[row][col]
__lowerCAmelCase: List[Any] = update_area_of_max_square_using_dp_array(SCREAMING_SNAKE_CASE , col + 1 , SCREAMING_SNAKE_CASE )
__lowerCAmelCase: Union[str, Any] = update_area_of_max_square_using_dp_array(row + 1 , col + 1 , SCREAMING_SNAKE_CASE )
__lowerCAmelCase: Any = update_area_of_max_square_using_dp_array(row + 1 , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
if mat[row][col]:
__lowerCAmelCase: int = 1 + min([right, diagonal, down] )
__lowerCAmelCase: Union[str, Any] = max(largest_square_area[0] , SCREAMING_SNAKE_CASE )
__lowerCAmelCase: Optional[int] = sub_problem_sol
return sub_problem_sol
else:
return 0
__lowerCAmelCase: int = [0]
__lowerCAmelCase: int = [[-1] * cols for _ in range(SCREAMING_SNAKE_CASE )]
update_area_of_max_square_using_dp_array(0 , 0 , SCREAMING_SNAKE_CASE )
return largest_square_area[0]
def _a ( SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : list[list[int]] ) -> int:
"""simple docstring"""
__lowerCAmelCase: int = [[0] * (cols + 1) for _ in range(rows + 1 )]
__lowerCAmelCase: Optional[Any] = 0
for row in range(rows - 1 , -1 , -1 ):
for col in range(cols - 1 , -1 , -1 ):
__lowerCAmelCase: Union[str, Any] = dp_array[row][col + 1]
__lowerCAmelCase: str = dp_array[row + 1][col + 1]
__lowerCAmelCase: Optional[int] = dp_array[row + 1][col]
if mat[row][col] == 1:
__lowerCAmelCase: Optional[Any] = 1 + min(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
__lowerCAmelCase: str = max(dp_array[row][col] , SCREAMING_SNAKE_CASE )
else:
__lowerCAmelCase: Dict = 0
return largest_square_area
def _a ( SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : list[list[int]] ) -> int:
"""simple docstring"""
__lowerCAmelCase: Tuple = [0] * (cols + 1)
__lowerCAmelCase: Optional[int] = [0] * (cols + 1)
__lowerCAmelCase: str = 0
for row in range(rows - 1 , -1 , -1 ):
for col in range(cols - 1 , -1 , -1 ):
__lowerCAmelCase: int = current_row[col + 1]
__lowerCAmelCase: Union[str, Any] = next_row[col + 1]
__lowerCAmelCase: Any = next_row[col]
if mat[row][col] == 1:
__lowerCAmelCase: str = 1 + min(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
__lowerCAmelCase: str = max(current_row[col] , SCREAMING_SNAKE_CASE )
else:
__lowerCAmelCase: Optional[Any] = 0
__lowerCAmelCase: int = current_row
return largest_square_area
if __name__ == "__main__":
import doctest
doctest.testmod()
print(largest_square_area_in_matrix_bottom_up(2, 2, [[1, 1], [1, 1]]))
| 322 | 1 |
import io
import itertools
import json
from dataclasses import dataclass
from typing import Optional
import pyarrow as pa
import pyarrow.json as paj
import datasets
from datasets.table import table_cast
from datasets.utils.file_utils import readline
_a = datasets.utils.logging.get_logger(__name__)
@dataclass
class A_ ( datasets.BuilderConfig ):
_lowercase : Optional[datasets.Features] = None
_lowercase : str = "utf-8"
_lowercase : Optional[str] = None
_lowercase : Optional[str] = None
_lowercase : bool = True # deprecated
_lowercase : Optional[int] = None # deprecated
_lowercase : int = 1_0 << 2_0 # 10MB
_lowercase : Optional[bool] = None
class A_ ( datasets.ArrowBasedBuilder ):
_lowercase : Union[str, Any] = JsonConfig
def UpperCAmelCase ( self : str ) -> str:
if self.config.block_size is not None:
logger.warning('The JSON loader parameter `block_size` is deprecated. Please use `chunksize` instead' )
__lowerCAmelCase: Union[str, Any] = self.config.block_size
if self.config.use_threads is not True:
logger.warning(
'The JSON loader parameter `use_threads` is deprecated and doesn\'t have any effect anymore.' )
if self.config.newlines_in_values is not None:
raise ValueError('The JSON loader parameter `newlines_in_values` is no longer supported' )
return datasets.DatasetInfo(features=self.config.features )
def UpperCAmelCase ( self : Optional[int] , UpperCAmelCase : Union[str, Any] ) -> str:
if not self.config.data_files:
raise ValueError(F'''At least one data file must be specified, but got data_files={self.config.data_files}''' )
__lowerCAmelCase: str = dl_manager.download_and_extract(self.config.data_files )
if isinstance(UpperCAmelCase , (str, list, tuple) ):
__lowerCAmelCase: List[Any] = data_files
if isinstance(UpperCAmelCase , UpperCAmelCase ):
__lowerCAmelCase: Dict = [files]
__lowerCAmelCase: str = [dl_manager.iter_files(UpperCAmelCase ) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={'files': files} )]
__lowerCAmelCase: int = []
for split_name, files in data_files.items():
if isinstance(UpperCAmelCase , UpperCAmelCase ):
__lowerCAmelCase: Dict = [files]
__lowerCAmelCase: Optional[int] = [dl_manager.iter_files(UpperCAmelCase ) for file in files]
splits.append(datasets.SplitGenerator(name=UpperCAmelCase , gen_kwargs={'files': files} ) )
return splits
def UpperCAmelCase ( self : Optional[int] , UpperCAmelCase : pa.Table ) -> pa.Table:
if self.config.features is not None:
# adding missing columns
for column_name in set(self.config.features ) - set(pa_table.column_names ):
__lowerCAmelCase: Optional[Any] = self.config.features.arrow_schema.field(UpperCAmelCase ).type
__lowerCAmelCase: int = pa_table.append_column(UpperCAmelCase , pa.array([None] * len(UpperCAmelCase ) , type=UpperCAmelCase ) )
# more expensive cast to support nested structures with keys in a different order
# allows str <-> int/float or str to Audio for example
__lowerCAmelCase: Union[str, Any] = table_cast(UpperCAmelCase , self.config.features.arrow_schema )
return pa_table
def UpperCAmelCase ( self : Union[str, Any] , UpperCAmelCase : List[Any] ) -> Optional[Any]:
for file_idx, file in enumerate(itertools.chain.from_iterable(UpperCAmelCase ) ):
# If the file is one json object and if we need to look at the list of items in one specific field
if self.config.field is not None:
with open(UpperCAmelCase , encoding=self.config.encoding , errors=self.config.encoding_errors ) as f:
__lowerCAmelCase: Any = json.load(UpperCAmelCase )
# We keep only the field we are interested in
__lowerCAmelCase: List[Any] = dataset[self.config.field]
# We accept two format: a list of dicts or a dict of lists
if isinstance(UpperCAmelCase , (list, tuple) ):
__lowerCAmelCase: Any = set().union(*[row.keys() for row in dataset] )
__lowerCAmelCase: int = {col: [row.get(UpperCAmelCase ) for row in dataset] for col in keys}
else:
__lowerCAmelCase: List[Any] = dataset
__lowerCAmelCase: Optional[Any] = pa.Table.from_pydict(UpperCAmelCase )
yield file_idx, self._cast_table(UpperCAmelCase )
# If the file has one json object per line
else:
with open(UpperCAmelCase , 'rb' ) as f:
__lowerCAmelCase: Any = 0
# Use block_size equal to the chunk size divided by 32 to leverage multithreading
# Set a default minimum value of 16kB if the chunk size is really small
__lowerCAmelCase: Any = max(self.config.chunksize // 3_2 , 1_6 << 1_0 )
__lowerCAmelCase: str = (
self.config.encoding_errors if self.config.encoding_errors is not None else 'strict'
)
while True:
__lowerCAmelCase: Optional[Any] = f.read(self.config.chunksize )
if not batch:
break
# Finish current line
try:
batch += f.readline()
except (AttributeError, io.UnsupportedOperation):
batch += readline(UpperCAmelCase )
# PyArrow only accepts utf-8 encoded bytes
if self.config.encoding != "utf-8":
__lowerCAmelCase: Tuple = batch.decode(self.config.encoding , errors=UpperCAmelCase ).encode('utf-8' )
try:
while True:
try:
__lowerCAmelCase: Any = paj.read_json(
io.BytesIO(UpperCAmelCase ) , read_options=paj.ReadOptions(block_size=UpperCAmelCase ) )
break
except (pa.ArrowInvalid, pa.ArrowNotImplementedError) as e:
if (
isinstance(UpperCAmelCase , pa.ArrowInvalid )
and "straddling" not in str(UpperCAmelCase )
or block_size > len(UpperCAmelCase )
):
raise
else:
# Increase the block size in case it was too small.
# The block size will be reset for the next file.
logger.debug(
F'''Batch of {len(UpperCAmelCase )} bytes couldn\'t be parsed with block_size={block_size}. Retrying with block_size={block_size * 2}.''' )
block_size *= 2
except pa.ArrowInvalid as e:
try:
with open(
UpperCAmelCase , encoding=self.config.encoding , errors=self.config.encoding_errors ) as f:
__lowerCAmelCase: Optional[int] = json.load(UpperCAmelCase )
except json.JSONDecodeError:
logger.error(F'''Failed to read file \'{file}\' with error {type(UpperCAmelCase )}: {e}''' )
raise e
# If possible, parse the file as a list of json objects and exit the loop
if isinstance(UpperCAmelCase , UpperCAmelCase ): # list is the only sequence type supported in JSON
try:
__lowerCAmelCase: str = set().union(*[row.keys() for row in dataset] )
__lowerCAmelCase: Optional[int] = {col: [row.get(UpperCAmelCase ) for row in dataset] for col in keys}
__lowerCAmelCase: Dict = pa.Table.from_pydict(UpperCAmelCase )
except (pa.ArrowInvalid, AttributeError) as e:
logger.error(F'''Failed to read file \'{file}\' with error {type(UpperCAmelCase )}: {e}''' )
raise ValueError(F'''Not able to read records in the JSON file at {file}.''' ) from None
yield file_idx, self._cast_table(UpperCAmelCase )
break
else:
logger.error(F'''Failed to read file \'{file}\' with error {type(UpperCAmelCase )}: {e}''' )
raise ValueError(
F'''Not able to read records in the JSON file at {file}. '''
F'''You should probably indicate the field of the JSON file containing your records. '''
F'''This JSON file contain the following fields: {str(list(dataset.keys() ) )}. '''
F'''Select the correct one and provide it as `field=\'XXX\'` to the dataset loading method. ''' ) from None
# Uncomment for debugging (will print the Arrow table size and elements)
# logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}")
# logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows)))
yield (file_idx, batch_idx), self._cast_table(UpperCAmelCase )
batch_idx += 1
| 322 |
import argparse
import json
import os
from tensorflow.core.protobuf.saved_model_pba import SavedModel
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_copies.py
_a = '''.'''
# Internal TensorFlow ops that can be safely ignored (mostly specific to a saved model)
_a = [
'''Assert''',
'''AssignVariableOp''',
'''EmptyTensorList''',
'''MergeV2Checkpoints''',
'''ReadVariableOp''',
'''ResourceGather''',
'''RestoreV2''',
'''SaveV2''',
'''ShardedFilename''',
'''StatefulPartitionedCall''',
'''StaticRegexFullMatch''',
'''VarHandleOp''',
]
def _a ( SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : Dict ) -> Union[str, Any]:
"""simple docstring"""
__lowerCAmelCase: Optional[int] = SavedModel()
__lowerCAmelCase: str = []
with open(os.path.join(SCREAMING_SNAKE_CASE , 'utils' , 'tf_ops' , 'onnx.json' ) ) as f:
__lowerCAmelCase: List[str] = json.load(SCREAMING_SNAKE_CASE )['opsets']
for i in range(1 , opset + 1 ):
onnx_ops.extend(onnx_opsets[str(SCREAMING_SNAKE_CASE )] )
with open(SCREAMING_SNAKE_CASE , 'rb' ) as f:
saved_model.ParseFromString(f.read() )
__lowerCAmelCase: Optional[int] = set()
# Iterate over every metagraph in case there is more than one (a saved model can contain multiple graphs)
for meta_graph in saved_model.meta_graphs:
# Add operations in the graph definition
model_op_names.update(node.op for node in meta_graph.graph_def.node )
# Go through the functions in the graph definition
for func in meta_graph.graph_def.library.function:
# Add operations in each function
model_op_names.update(node.op for node in func.node_def )
# Convert to list, sorted if you want
__lowerCAmelCase: List[str] = sorted(SCREAMING_SNAKE_CASE )
__lowerCAmelCase: Optional[int] = []
for op in model_op_names:
if op not in onnx_ops and op not in INTERNAL_OPS:
incompatible_ops.append(SCREAMING_SNAKE_CASE )
if strict and len(SCREAMING_SNAKE_CASE ) > 0:
raise Exception(f'''Found the following incompatible ops for the opset {opset}:\n''' + incompatible_ops )
elif len(SCREAMING_SNAKE_CASE ) > 0:
print(f'''Found the following incompatible ops for the opset {opset}:''' )
print(*SCREAMING_SNAKE_CASE , sep='\n' )
else:
print(f'''The saved model {saved_model_path} can properly be converted with ONNX.''' )
if __name__ == "__main__":
_a = argparse.ArgumentParser()
parser.add_argument('''--saved_model_path''', help='''Path of the saved model to check (the .pb file).''')
parser.add_argument(
'''--opset''', default=1_2, type=int, help='''The ONNX opset against which the model has to be tested.'''
)
parser.add_argument(
'''--framework''', choices=['''onnx'''], default='''onnx''', help='''Frameworks against which to test the saved model.'''
)
parser.add_argument(
'''--strict''', action='''store_true''', help='''Whether make the checking strict (raise errors) or not (raise warnings)'''
)
_a = parser.parse_args()
if args.framework == "onnx":
onnx_compliancy(args.saved_model_path, args.strict, args.opset)
| 322 | 1 |
from scipy.stats import pearsonr, spearmanr
from sklearn.metrics import fa_score, matthews_corrcoef
import datasets
_a = '''\
@inproceedings{wang2019glue,
title={{GLUE}: A Multi-Task Benchmark and Analysis Platform for Natural Language Understanding},
author={Wang, Alex and Singh, Amanpreet and Michael, Julian and Hill, Felix and Levy, Omer and Bowman, Samuel R.},
note={In the Proceedings of ICLR.},
year={2019}
}
'''
_a = '''\
GLUE, the General Language Understanding Evaluation benchmark
(https://gluebenchmark.com/) is a collection of resources for training,
evaluating, and analyzing natural language understanding systems.
'''
_a = '''
Compute GLUE evaluation metric associated to each GLUE dataset.
Args:
predictions: list of predictions to score.
Each translation should be tokenized into a list of tokens.
references: list of lists of references for each translation.
Each reference should be tokenized into a list of tokens.
Returns: depending on the GLUE subset, one or several of:
"accuracy": Accuracy
"f1": F1 score
"pearson": Pearson Correlation
"spearmanr": Spearman Correlation
"matthews_correlation": Matthew Correlation
Examples:
>>> glue_metric = datasets.load_metric(\'glue\', \'sst2\') # \'sst2\' or any of ["mnli", "mnli_mismatched", "mnli_matched", "qnli", "rte", "wnli", "hans"]
>>> references = [0, 1]
>>> predictions = [0, 1]
>>> results = glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'accuracy\': 1.0}
>>> glue_metric = datasets.load_metric(\'glue\', \'mrpc\') # \'mrpc\' or \'qqp\'
>>> references = [0, 1]
>>> predictions = [0, 1]
>>> results = glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'accuracy\': 1.0, \'f1\': 1.0}
>>> glue_metric = datasets.load_metric(\'glue\', \'stsb\')
>>> references = [0., 1., 2., 3., 4., 5.]
>>> predictions = [0., 1., 2., 3., 4., 5.]
>>> results = glue_metric.compute(predictions=predictions, references=references)
>>> print({"pearson": round(results["pearson"], 2), "spearmanr": round(results["spearmanr"], 2)})
{\'pearson\': 1.0, \'spearmanr\': 1.0}
>>> glue_metric = datasets.load_metric(\'glue\', \'cola\')
>>> references = [0, 1]
>>> predictions = [0, 1]
>>> results = glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'matthews_correlation\': 1.0}
'''
def _a ( SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : List[str] ) -> Dict:
"""simple docstring"""
return float((preds == labels).mean() )
def _a ( SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : Any ) -> List[Any]:
"""simple docstring"""
__lowerCAmelCase: Dict = simple_accuracy(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
__lowerCAmelCase: int = float(fa_score(y_true=SCREAMING_SNAKE_CASE , y_pred=SCREAMING_SNAKE_CASE ) )
return {
"accuracy": acc,
"f1": fa,
}
def _a ( SCREAMING_SNAKE_CASE : List[str] , SCREAMING_SNAKE_CASE : List[str] ) -> Tuple:
"""simple docstring"""
__lowerCAmelCase: Any = float(pearsonr(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )[0] )
__lowerCAmelCase: int = float(spearmanr(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )[0] )
return {
"pearson": pearson_corr,
"spearmanr": spearman_corr,
}
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class A_ ( datasets.Metric ):
def UpperCAmelCase ( self : List[str] ) -> Dict:
if self.config_name not in [
"sst2",
"mnli",
"mnli_mismatched",
"mnli_matched",
"cola",
"stsb",
"mrpc",
"qqp",
"qnli",
"rte",
"wnli",
"hans",
]:
raise KeyError(
'You should supply a configuration name selected in '
'["sst2", "mnli", "mnli_mismatched", "mnli_matched", '
'"cola", "stsb", "mrpc", "qqp", "qnli", "rte", "wnli", "hans"]' )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Value('int64' if self.config_name != 'stsb' else 'float32' ),
'references': datasets.Value('int64' if self.config_name != 'stsb' else 'float32' ),
} ) , codebase_urls=[] , reference_urls=[] , format='numpy' , )
def UpperCAmelCase ( self : Any , UpperCAmelCase : Any , UpperCAmelCase : int ) -> int:
if self.config_name == "cola":
return {"matthews_correlation": matthews_corrcoef(UpperCAmelCase , UpperCAmelCase )}
elif self.config_name == "stsb":
return pearson_and_spearman(UpperCAmelCase , UpperCAmelCase )
elif self.config_name in ["mrpc", "qqp"]:
return acc_and_fa(UpperCAmelCase , UpperCAmelCase )
elif self.config_name in ["sst2", "mnli", "mnli_mismatched", "mnli_matched", "qnli", "rte", "wnli", "hans"]:
return {"accuracy": simple_accuracy(UpperCAmelCase , UpperCAmelCase )}
else:
raise KeyError(
'You should supply a configuration name selected in '
'["sst2", "mnli", "mnli_mismatched", "mnli_matched", '
'"cola", "stsb", "mrpc", "qqp", "qnli", "rte", "wnli", "hans"]' )
| 322 |
import math
import qiskit
def _a ( SCREAMING_SNAKE_CASE : int = 1 , SCREAMING_SNAKE_CASE : int = 1 , SCREAMING_SNAKE_CASE : int = 1 ) -> qiskit.result.counts.Counts:
"""simple docstring"""
if (
isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
or isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
or isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
):
raise TypeError('inputs must be integers.' )
if (input_a < 0) or (input_a < 0) or (carry_in < 0):
raise ValueError('inputs must be positive.' )
if (
(math.floor(SCREAMING_SNAKE_CASE ) != input_a)
or (math.floor(SCREAMING_SNAKE_CASE ) != input_a)
or (math.floor(SCREAMING_SNAKE_CASE ) != carry_in)
):
raise ValueError('inputs must be exact integers.' )
if (input_a > 2) or (input_a > 2) or (carry_in > 2):
raise ValueError('inputs must be less or equal to 2.' )
# build registers
__lowerCAmelCase: Union[str, Any] = qiskit.QuantumRegister(4 , 'qr' )
__lowerCAmelCase: List[Any] = qiskit.ClassicalRegister(2 , 'cr' )
# list the entries
__lowerCAmelCase: Any = [input_a, input_a, carry_in]
__lowerCAmelCase: List[str] = qiskit.QuantumCircuit(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
for i in range(0 , 3 ):
if entry[i] == 2:
quantum_circuit.h(SCREAMING_SNAKE_CASE ) # for hadamard entries
elif entry[i] == 1:
quantum_circuit.x(SCREAMING_SNAKE_CASE ) # for 1 entries
elif entry[i] == 0:
quantum_circuit.i(SCREAMING_SNAKE_CASE ) # for 0 entries
# build the circuit
quantum_circuit.ccx(0 , 1 , 3 ) # ccx = toffoli gate
quantum_circuit.cx(0 , 1 )
quantum_circuit.ccx(1 , 2 , 3 )
quantum_circuit.cx(1 , 2 )
quantum_circuit.cx(0 , 1 )
quantum_circuit.measure([2, 3] , SCREAMING_SNAKE_CASE ) # measure the last two qbits
__lowerCAmelCase: List[str] = qiskit.Aer.get_backend('aer_simulator' )
__lowerCAmelCase: List[Any] = qiskit.execute(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , shots=10_00 )
return job.result().get_counts(SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
print(f"Total sum count for state is: {quantum_full_adder(1, 1, 1)}")
| 322 | 1 |
_a = [sum(int(c, 1_0) ** 2 for c in i.__str__()) for i in range(1_0_0_0_0_0)]
def _a ( SCREAMING_SNAKE_CASE : int ) -> int:
"""simple docstring"""
__lowerCAmelCase: Optional[int] = 0
while number:
# Increased Speed Slightly by checking every 5 digits together.
sum_of_digits_squared += DIGITS_SQUARED[number % 10_00_00]
number //= 10_00_00
return sum_of_digits_squared
# There are 2 Chains made,
# One ends with 89 with the chain member 58 being the one which when declared first,
# there will be the least number of iterations for all the members to be checked.
# The other one ends with 1 and has only one element 1.
# So 58 and 1 are chosen to be declared at the starting.
# Changed dictionary to an array to quicken the solution
_a = [None] * 1_0_0_0_0_0_0_0
_a = True
_a = False
def _a ( SCREAMING_SNAKE_CASE : int ) -> bool:
"""simple docstring"""
if CHAINS[number - 1] is not None:
return CHAINS[number - 1] # type: ignore
__lowerCAmelCase: int = chain(next_number(SCREAMING_SNAKE_CASE ) )
__lowerCAmelCase: Tuple = number_chain
while number < 10_00_00_00:
__lowerCAmelCase: Dict = number_chain
number *= 10
return number_chain
def _a ( SCREAMING_SNAKE_CASE : int = 10_00_00_00 ) -> int:
"""simple docstring"""
for i in range(1 , SCREAMING_SNAKE_CASE ):
if CHAINS[i] is None:
chain(i + 1 )
return CHAINS[:number].count(SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
import doctest
doctest.testmod()
print(f"{solution() = }")
| 322 |
from __future__ import annotations
import copy
import inspect
import unittest
import numpy as np
from transformers import is_tf_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_tf, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST,
TF_MODEL_FOR_MULTIPLE_CHOICE_MAPPING,
TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
LayoutLMvaConfig,
TFLayoutLMvaForQuestionAnswering,
TFLayoutLMvaForSequenceClassification,
TFLayoutLMvaForTokenClassification,
TFLayoutLMvaModel,
)
if is_vision_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class A_ :
def __init__( self : Dict , UpperCAmelCase : Optional[Any] , UpperCAmelCase : Optional[int]=2 , UpperCAmelCase : int=3 , UpperCAmelCase : int=4 , UpperCAmelCase : str=2 , UpperCAmelCase : Union[str, Any]=7 , UpperCAmelCase : List[str]=True , UpperCAmelCase : Dict=True , UpperCAmelCase : Tuple=True , UpperCAmelCase : Optional[Any]=True , UpperCAmelCase : Optional[Any]=9_9 , UpperCAmelCase : Tuple=3_6 , UpperCAmelCase : Tuple=2 , UpperCAmelCase : Optional[int]=4 , UpperCAmelCase : Union[str, Any]=3_7 , UpperCAmelCase : Any="gelu" , UpperCAmelCase : Optional[int]=0.1 , UpperCAmelCase : Optional[Any]=0.1 , UpperCAmelCase : List[str]=5_1_2 , UpperCAmelCase : int=1_6 , UpperCAmelCase : Optional[Any]=2 , UpperCAmelCase : Optional[Any]=0.02 , UpperCAmelCase : Optional[Any]=6 , UpperCAmelCase : int=6 , UpperCAmelCase : str=3 , UpperCAmelCase : Any=4 , UpperCAmelCase : Optional[int]=None , UpperCAmelCase : List[str]=1_0_0_0 , ) -> int:
__lowerCAmelCase: List[str] = parent
__lowerCAmelCase: List[str] = batch_size
__lowerCAmelCase: Optional[Any] = num_channels
__lowerCAmelCase: Tuple = image_size
__lowerCAmelCase: str = patch_size
__lowerCAmelCase: List[str] = is_training
__lowerCAmelCase: Union[str, Any] = use_input_mask
__lowerCAmelCase: Union[str, Any] = use_token_type_ids
__lowerCAmelCase: Tuple = use_labels
__lowerCAmelCase: Optional[int] = vocab_size
__lowerCAmelCase: Any = hidden_size
__lowerCAmelCase: Tuple = num_hidden_layers
__lowerCAmelCase: Optional[int] = num_attention_heads
__lowerCAmelCase: Dict = intermediate_size
__lowerCAmelCase: Union[str, Any] = hidden_act
__lowerCAmelCase: str = hidden_dropout_prob
__lowerCAmelCase: str = attention_probs_dropout_prob
__lowerCAmelCase: str = max_position_embeddings
__lowerCAmelCase: str = type_vocab_size
__lowerCAmelCase: Optional[Any] = type_sequence_label_size
__lowerCAmelCase: Union[str, Any] = initializer_range
__lowerCAmelCase: List[str] = coordinate_size
__lowerCAmelCase: Tuple = shape_size
__lowerCAmelCase: List[Any] = num_labels
__lowerCAmelCase: Any = num_choices
__lowerCAmelCase: List[str] = scope
__lowerCAmelCase: Dict = range_bbox
# LayoutLMv3's sequence length equals the number of text tokens + number of patches + 1 (we add 1 for the CLS token)
__lowerCAmelCase: Optional[Any] = text_seq_length
__lowerCAmelCase: List[Any] = (image_size // patch_size) ** 2 + 1
__lowerCAmelCase: int = self.text_seq_length + self.image_seq_length
def UpperCAmelCase ( self : Any ) -> Any:
__lowerCAmelCase: Dict = ids_tensor([self.batch_size, self.text_seq_length] , self.vocab_size )
__lowerCAmelCase: Any = ids_tensor([self.batch_size, self.text_seq_length, 4] , self.range_bbox )
__lowerCAmelCase: str = bbox.numpy()
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
__lowerCAmelCase: Optional[Any] = bbox[i, j, 3]
__lowerCAmelCase: Tuple = bbox[i, j, 1]
__lowerCAmelCase: Dict = tmp_coordinate
if bbox[i, j, 2] < bbox[i, j, 0]:
__lowerCAmelCase: Any = bbox[i, j, 2]
__lowerCAmelCase: int = bbox[i, j, 0]
__lowerCAmelCase: int = tmp_coordinate
__lowerCAmelCase: List[Any] = tf.constant(UpperCAmelCase )
__lowerCAmelCase: Dict = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__lowerCAmelCase: Union[str, Any] = None
if self.use_input_mask:
__lowerCAmelCase: List[Any] = random_attention_mask([self.batch_size, self.text_seq_length] )
__lowerCAmelCase: int = None
if self.use_token_type_ids:
__lowerCAmelCase: List[Any] = ids_tensor([self.batch_size, self.text_seq_length] , self.type_vocab_size )
__lowerCAmelCase: str = None
__lowerCAmelCase: Dict = None
if self.use_labels:
__lowerCAmelCase: Dict = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__lowerCAmelCase: List[str] = ids_tensor([self.batch_size, self.text_seq_length] , self.num_labels )
__lowerCAmelCase: Dict = LayoutLMvaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , coordinate_size=self.coordinate_size , shape_size=self.shape_size , input_size=self.image_size , patch_size=self.patch_size , )
return config, input_ids, bbox, pixel_values, token_type_ids, input_mask, sequence_labels, token_labels
def UpperCAmelCase ( self : Tuple , UpperCAmelCase : str , UpperCAmelCase : Optional[int] , UpperCAmelCase : Optional[int] , UpperCAmelCase : int , UpperCAmelCase : List[Any] , UpperCAmelCase : Tuple ) -> int:
__lowerCAmelCase: Tuple = TFLayoutLMvaModel(config=UpperCAmelCase )
# text + image
__lowerCAmelCase: Dict = model(UpperCAmelCase , pixel_values=UpperCAmelCase , training=UpperCAmelCase )
__lowerCAmelCase: List[str] = model(
UpperCAmelCase , bbox=UpperCAmelCase , pixel_values=UpperCAmelCase , attention_mask=UpperCAmelCase , token_type_ids=UpperCAmelCase , training=UpperCAmelCase , )
__lowerCAmelCase: Optional[Any] = model(UpperCAmelCase , bbox=UpperCAmelCase , pixel_values=UpperCAmelCase , training=UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
# text only
__lowerCAmelCase: str = model(UpperCAmelCase , training=UpperCAmelCase )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.text_seq_length, self.hidden_size) )
# image only
__lowerCAmelCase: List[str] = model({'pixel_values': pixel_values} , training=UpperCAmelCase )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.image_seq_length, self.hidden_size) )
def UpperCAmelCase ( self : Union[str, Any] , UpperCAmelCase : Optional[int] , UpperCAmelCase : Any , UpperCAmelCase : Optional[int] , UpperCAmelCase : List[str] , UpperCAmelCase : int , UpperCAmelCase : Tuple , UpperCAmelCase : List[Any] ) -> int:
__lowerCAmelCase: List[str] = self.num_labels
__lowerCAmelCase: Tuple = TFLayoutLMvaForSequenceClassification(config=UpperCAmelCase )
__lowerCAmelCase: int = model(
UpperCAmelCase , bbox=UpperCAmelCase , pixel_values=UpperCAmelCase , attention_mask=UpperCAmelCase , token_type_ids=UpperCAmelCase , labels=UpperCAmelCase , training=UpperCAmelCase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def UpperCAmelCase ( self : Any , UpperCAmelCase : List[Any] , UpperCAmelCase : Optional[Any] , UpperCAmelCase : int , UpperCAmelCase : int , UpperCAmelCase : List[Any] , UpperCAmelCase : str , UpperCAmelCase : int ) -> Any:
__lowerCAmelCase: Union[str, Any] = self.num_labels
__lowerCAmelCase: List[str] = TFLayoutLMvaForTokenClassification(config=UpperCAmelCase )
__lowerCAmelCase: Any = model(
UpperCAmelCase , bbox=UpperCAmelCase , pixel_values=UpperCAmelCase , attention_mask=UpperCAmelCase , token_type_ids=UpperCAmelCase , labels=UpperCAmelCase , training=UpperCAmelCase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.text_seq_length, self.num_labels) )
def UpperCAmelCase ( self : List[Any] , UpperCAmelCase : Any , UpperCAmelCase : str , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : Any , UpperCAmelCase : str , UpperCAmelCase : List[str] , UpperCAmelCase : List[Any] ) -> Any:
__lowerCAmelCase: str = 2
__lowerCAmelCase: Dict = TFLayoutLMvaForQuestionAnswering(config=UpperCAmelCase )
__lowerCAmelCase: int = model(
UpperCAmelCase , bbox=UpperCAmelCase , pixel_values=UpperCAmelCase , attention_mask=UpperCAmelCase , token_type_ids=UpperCAmelCase , start_positions=UpperCAmelCase , end_positions=UpperCAmelCase , training=UpperCAmelCase , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def UpperCAmelCase ( self : List[str] ) -> Union[str, Any]:
__lowerCAmelCase: Union[str, Any] = self.prepare_config_and_inputs()
((__lowerCAmelCase) , (__lowerCAmelCase) , (__lowerCAmelCase) , (__lowerCAmelCase) , (__lowerCAmelCase) , (__lowerCAmelCase) , (__lowerCAmelCase) , (__lowerCAmelCase)): List[str] = config_and_inputs
__lowerCAmelCase: List[str] = {
'input_ids': input_ids,
'bbox': bbox,
'pixel_values': pixel_values,
'token_type_ids': token_type_ids,
'attention_mask': input_mask,
}
return config, inputs_dict
@require_tf
class A_ ( snake_case__ , snake_case__ , unittest.TestCase ):
_lowercase : List[Any] = (
(
TFLayoutLMvaModel,
TFLayoutLMvaForQuestionAnswering,
TFLayoutLMvaForSequenceClassification,
TFLayoutLMvaForTokenClassification,
)
if is_tf_available()
else ()
)
_lowercase : Tuple = (
{'document-question-answering': TFLayoutLMvaForQuestionAnswering, 'feature-extraction': TFLayoutLMvaModel}
if is_tf_available()
else {}
)
_lowercase : Union[str, Any] = False
_lowercase : Dict = False
_lowercase : Tuple = False
def UpperCAmelCase ( self : Optional[Any] , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : int , UpperCAmelCase : Tuple , UpperCAmelCase : Optional[Any] , UpperCAmelCase : Optional[Any] ) -> List[str]:
return True
def UpperCAmelCase ( self : Tuple , UpperCAmelCase : int , UpperCAmelCase : Tuple , UpperCAmelCase : Dict=False ) -> dict:
__lowerCAmelCase: Optional[Any] = copy.deepcopy(UpperCAmelCase )
if model_class in get_values(UpperCAmelCase ):
__lowerCAmelCase: int = {
k: tf.tile(tf.expand_dims(UpperCAmelCase , 1 ) , (1, self.model_tester.num_choices) + (1,) * (v.ndim - 1) )
if isinstance(UpperCAmelCase , tf.Tensor ) and v.ndim > 0
else v
for k, v in inputs_dict.items()
}
if return_labels:
if model_class in get_values(UpperCAmelCase ):
__lowerCAmelCase: Tuple = tf.ones(self.model_tester.batch_size , dtype=tf.intaa )
elif model_class in get_values(UpperCAmelCase ):
__lowerCAmelCase: Optional[Any] = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa )
__lowerCAmelCase: Union[str, Any] = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa )
elif model_class in get_values(UpperCAmelCase ):
__lowerCAmelCase: Union[str, Any] = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa )
elif model_class in get_values(UpperCAmelCase ):
__lowerCAmelCase: str = tf.zeros(
(self.model_tester.batch_size, self.model_tester.text_seq_length) , dtype=tf.intaa )
return inputs_dict
def UpperCAmelCase ( self : Optional[int] ) -> Optional[Any]:
__lowerCAmelCase: Tuple = TFLayoutLMvaModelTester(self )
__lowerCAmelCase: str = ConfigTester(self , config_class=UpperCAmelCase , hidden_size=3_7 )
def UpperCAmelCase ( self : Tuple ) -> Dict:
self.config_tester.run_common_tests()
def UpperCAmelCase ( self : List[Any] ) -> Tuple:
__lowerCAmelCase , __lowerCAmelCase: Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowerCAmelCase: List[Any] = model_class(UpperCAmelCase )
if getattr(UpperCAmelCase , 'hf_compute_loss' , UpperCAmelCase ):
# The number of elements in the loss should be the same as the number of elements in the label
__lowerCAmelCase: Optional[int] = self._prepare_for_class(inputs_dict.copy() , UpperCAmelCase , return_labels=UpperCAmelCase )
__lowerCAmelCase: List[Any] = prepared_for_class[
sorted(prepared_for_class.keys() - inputs_dict.keys() , reverse=UpperCAmelCase )[0]
]
__lowerCAmelCase: Tuple = added_label.shape.as_list()[:1]
# Test that model correctly compute the loss with kwargs
__lowerCAmelCase: Optional[Any] = self._prepare_for_class(inputs_dict.copy() , UpperCAmelCase , return_labels=UpperCAmelCase )
__lowerCAmelCase: Tuple = prepared_for_class.pop('input_ids' )
__lowerCAmelCase: Union[str, Any] = model(UpperCAmelCase , **UpperCAmelCase )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
# Test that model correctly compute the loss when we mask some positions
__lowerCAmelCase: Union[str, Any] = self._prepare_for_class(inputs_dict.copy() , UpperCAmelCase , return_labels=UpperCAmelCase )
__lowerCAmelCase: Optional[int] = prepared_for_class.pop('input_ids' )
if "labels" in prepared_for_class:
__lowerCAmelCase: str = prepared_for_class['labels'].numpy()
if len(labels.shape ) > 1 and labels.shape[1] != 1:
__lowerCAmelCase: Tuple = -1_0_0
__lowerCAmelCase: Union[str, Any] = tf.convert_to_tensor(UpperCAmelCase )
__lowerCAmelCase: Dict = model(UpperCAmelCase , **UpperCAmelCase )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
self.assertTrue(not np.any(np.isnan(loss.numpy() ) ) )
# Test that model correctly compute the loss with a dict
__lowerCAmelCase: str = self._prepare_for_class(inputs_dict.copy() , UpperCAmelCase , return_labels=UpperCAmelCase )
__lowerCAmelCase: Optional[Any] = model(UpperCAmelCase )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
# Test that model correctly compute the loss with a tuple
__lowerCAmelCase: Any = self._prepare_for_class(inputs_dict.copy() , UpperCAmelCase , return_labels=UpperCAmelCase )
# Get keys that were added with the _prepare_for_class function
__lowerCAmelCase: Tuple = prepared_for_class.keys() - inputs_dict.keys()
__lowerCAmelCase: Dict = inspect.signature(model.call ).parameters
__lowerCAmelCase: Dict = list(signature.keys() )
# Create a dictionary holding the location of the tensors in the tuple
__lowerCAmelCase: str = {0: 'input_ids'}
for label_key in label_keys:
__lowerCAmelCase: Optional[Any] = signature_names.index(UpperCAmelCase )
__lowerCAmelCase: Tuple = label_key
__lowerCAmelCase: Tuple = sorted(tuple_index_mapping.items() )
# Initialize a list with their default values, update the values and convert to a tuple
__lowerCAmelCase: List[Any] = []
for name in signature_names:
if name != "kwargs":
list_input.append(signature[name].default )
for index, value in sorted_tuple_index_mapping:
__lowerCAmelCase: Optional[Any] = prepared_for_class[value]
__lowerCAmelCase: Union[str, Any] = tuple(UpperCAmelCase )
# Send to model
__lowerCAmelCase: Any = model(tuple_input[:-1] )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
def UpperCAmelCase ( self : Dict ) -> Tuple:
(
(
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) ,
): str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
def UpperCAmelCase ( self : Dict ) -> int:
(
(
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) ,
): str = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
__lowerCAmelCase: Tuple = type
self.model_tester.create_and_check_model(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
def UpperCAmelCase ( self : str ) -> List[str]:
(
(
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) ,
): Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
def UpperCAmelCase ( self : int ) -> List[str]:
(
(
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) ,
): Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
def UpperCAmelCase ( self : Tuple ) -> str:
(
(
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) ,
): Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
@slow
def UpperCAmelCase ( self : Union[str, Any] ) -> Tuple:
for model_name in TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowerCAmelCase: Optional[int] = TFLayoutLMvaModel.from_pretrained(UpperCAmelCase )
self.assertIsNotNone(UpperCAmelCase )
def _a ( ) -> Any:
"""simple docstring"""
__lowerCAmelCase: Dict = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_tf
class A_ ( unittest.TestCase ):
@cached_property
def UpperCAmelCase ( self : int ) -> Dict:
return LayoutLMvaImageProcessor(apply_ocr=UpperCAmelCase ) if is_vision_available() else None
@slow
def UpperCAmelCase ( self : Any ) -> List[str]:
__lowerCAmelCase: Any = TFLayoutLMvaModel.from_pretrained('microsoft/layoutlmv3-base' )
__lowerCAmelCase: Tuple = self.default_image_processor
__lowerCAmelCase: str = prepare_img()
__lowerCAmelCase: Optional[int] = image_processor(images=UpperCAmelCase , return_tensors='tf' ).pixel_values
__lowerCAmelCase: Dict = tf.constant([[1, 2]] )
__lowerCAmelCase: str = tf.expand_dims(tf.constant([[1, 2, 3, 4], [5, 6, 7, 8]] ) , axis=0 )
# forward pass
__lowerCAmelCase: List[str] = model(input_ids=UpperCAmelCase , bbox=UpperCAmelCase , pixel_values=UpperCAmelCase , training=UpperCAmelCase )
# verify the logits
__lowerCAmelCase: Tuple = (1, 1_9_9, 7_6_8)
self.assertEqual(outputs.last_hidden_state.shape , UpperCAmelCase )
__lowerCAmelCase: str = tf.constant(
[[-0.0529, 0.3618, 0.1632], [-0.1587, -0.1667, -0.0400], [-0.1557, -0.1671, -0.0505]] )
self.assertTrue(np.allclose(outputs.last_hidden_state[0, :3, :3] , UpperCAmelCase , atol=1E-4 ) )
| 322 | 1 |
import argparse
import os
import re
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_dummies.py
_a = '''src/diffusers'''
# Matches is_xxx_available()
_a = re.compile(R'''is\_([a-z_]*)_available\(\)''')
# Matches from xxx import bla
_a = re.compile(R'''\s+from\s+\S*\s+import\s+([^\(\s].*)\n''')
_a = '''
{0} = None
'''
_a = '''
class {0}(metaclass=DummyObject):
_backends = {1}
def __init__(self, *args, **kwargs):
requires_backends(self, {1})
@classmethod
def from_config(cls, *args, **kwargs):
requires_backends(cls, {1})
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, {1})
'''
_a = '''
def {0}(*args, **kwargs):
requires_backends({0}, {1})
'''
def _a ( SCREAMING_SNAKE_CASE : List[Any] ) -> List[str]:
"""simple docstring"""
__lowerCAmelCase: Union[str, Any] = _re_backend.findall(SCREAMING_SNAKE_CASE )
if len(SCREAMING_SNAKE_CASE ) == 0:
return None
return "_and_".join(SCREAMING_SNAKE_CASE )
def _a ( ) -> Optional[int]:
"""simple docstring"""
with open(os.path.join(SCREAMING_SNAKE_CASE , '__init__.py' ) , 'r' , encoding='utf-8' , newline='\n' ) as f:
__lowerCAmelCase: str = f.readlines()
# Get to the point we do the actual imports for type checking
__lowerCAmelCase: Tuple = 0
__lowerCAmelCase: int = {}
# Go through the end of the file
while line_index < len(SCREAMING_SNAKE_CASE ):
# If the line contains is_backend_available, we grab all objects associated with the `else` block
__lowerCAmelCase: List[Any] = find_backend(lines[line_index] )
if backend is not None:
while not lines[line_index].startswith('else:' ):
line_index += 1
line_index += 1
__lowerCAmelCase: Tuple = []
# Until we unindent, add backend objects to the list
while line_index < len(SCREAMING_SNAKE_CASE ) and len(lines[line_index] ) > 1:
__lowerCAmelCase: Optional[Any] = lines[line_index]
__lowerCAmelCase: Tuple = _re_single_line_import.search(SCREAMING_SNAKE_CASE )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(', ' ) )
elif line.startswith(' ' * 8 ):
objects.append(line[8:-2] )
line_index += 1
if len(SCREAMING_SNAKE_CASE ) > 0:
__lowerCAmelCase: Union[str, Any] = objects
else:
line_index += 1
return backend_specific_objects
def _a ( SCREAMING_SNAKE_CASE : Union[str, Any] , SCREAMING_SNAKE_CASE : Optional[int] ) -> int:
"""simple docstring"""
if name.isupper():
return DUMMY_CONSTANT.format(SCREAMING_SNAKE_CASE )
elif name.islower():
return DUMMY_FUNCTION.format(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
else:
return DUMMY_CLASS.format(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
def _a ( SCREAMING_SNAKE_CASE : str=None ) -> Union[str, Any]:
"""simple docstring"""
if backend_specific_objects is None:
__lowerCAmelCase: List[Any] = read_init()
# For special correspondence backend to module name as used in the function requires_modulename
__lowerCAmelCase: List[Any] = {}
for backend, objects in backend_specific_objects.items():
__lowerCAmelCase: int = '[' + ', '.join(f'''"{b}"''' for b in backend.split('_and_' ) ) + ']'
__lowerCAmelCase: List[str] = '# This file is autogenerated by the command `make fix-copies`, do not edit.\n'
dummy_file += "from ..utils import DummyObject, requires_backends\n\n"
dummy_file += "\n".join([create_dummy_object(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) for o in objects] )
__lowerCAmelCase: Union[str, Any] = dummy_file
return dummy_files
def _a ( SCREAMING_SNAKE_CASE : Union[str, Any]=False ) -> int:
"""simple docstring"""
__lowerCAmelCase: Union[str, Any] = create_dummy_files()
# For special correspondence backend to shortcut as used in utils/dummy_xxx_objects.py
__lowerCAmelCase: List[Any] = {'torch': 'pt'}
# Locate actual dummy modules and read their content.
__lowerCAmelCase: int = os.path.join(SCREAMING_SNAKE_CASE , 'utils' )
__lowerCAmelCase: List[Any] = {
backend: os.path.join(SCREAMING_SNAKE_CASE , f'''dummy_{short_names.get(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )}_objects.py''' )
for backend in dummy_files.keys()
}
__lowerCAmelCase: Union[str, Any] = {}
for backend, file_path in dummy_file_paths.items():
if os.path.isfile(SCREAMING_SNAKE_CASE ):
with open(SCREAMING_SNAKE_CASE , 'r' , encoding='utf-8' , newline='\n' ) as f:
__lowerCAmelCase: str = f.read()
else:
__lowerCAmelCase: List[str] = ''
for backend in dummy_files.keys():
if dummy_files[backend] != actual_dummies[backend]:
if overwrite:
print(
f'''Updating diffusers.utils.dummy_{short_names.get(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )}_objects.py as the main '''
'__init__ has new objects.' )
with open(dummy_file_paths[backend] , 'w' , encoding='utf-8' , newline='\n' ) as f:
f.write(dummy_files[backend] )
else:
raise ValueError(
'The main __init__ has objects that are not present in '
f'''diffusers.utils.dummy_{short_names.get(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )}_objects.py. Run `make fix-copies` '''
'to fix this.' )
if __name__ == "__main__":
_a = argparse.ArgumentParser()
parser.add_argument('''--fix_and_overwrite''', action='''store_true''', help='''Whether to fix inconsistencies.''')
_a = parser.parse_args()
check_dummies(args.fix_and_overwrite)
| 322 |
import unittest
import numpy as np
from transformers import AlbertConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.albert.modeling_flax_albert import (
FlaxAlbertForMaskedLM,
FlaxAlbertForMultipleChoice,
FlaxAlbertForPreTraining,
FlaxAlbertForQuestionAnswering,
FlaxAlbertForSequenceClassification,
FlaxAlbertForTokenClassification,
FlaxAlbertModel,
)
class A_ ( unittest.TestCase ):
def __init__( self : List[str] , UpperCAmelCase : Tuple , UpperCAmelCase : Optional[Any]=1_3 , UpperCAmelCase : Optional[int]=7 , UpperCAmelCase : Tuple=True , UpperCAmelCase : str=True , UpperCAmelCase : List[Any]=True , UpperCAmelCase : Union[str, Any]=True , UpperCAmelCase : List[str]=9_9 , UpperCAmelCase : Optional[int]=3_2 , UpperCAmelCase : Dict=5 , UpperCAmelCase : int=4 , UpperCAmelCase : Optional[Any]=3_7 , UpperCAmelCase : List[str]="gelu" , UpperCAmelCase : Tuple=0.1 , UpperCAmelCase : int=0.1 , UpperCAmelCase : str=5_1_2 , UpperCAmelCase : Dict=1_6 , UpperCAmelCase : Union[str, Any]=2 , UpperCAmelCase : int=0.02 , UpperCAmelCase : List[Any]=4 , ) -> Optional[Any]:
__lowerCAmelCase: str = parent
__lowerCAmelCase: Dict = batch_size
__lowerCAmelCase: Optional[int] = seq_length
__lowerCAmelCase: Dict = is_training
__lowerCAmelCase: Optional[Any] = use_attention_mask
__lowerCAmelCase: List[Any] = use_token_type_ids
__lowerCAmelCase: Optional[int] = use_labels
__lowerCAmelCase: Optional[Any] = vocab_size
__lowerCAmelCase: Optional[Any] = hidden_size
__lowerCAmelCase: Tuple = num_hidden_layers
__lowerCAmelCase: List[str] = num_attention_heads
__lowerCAmelCase: int = intermediate_size
__lowerCAmelCase: Union[str, Any] = hidden_act
__lowerCAmelCase: List[Any] = hidden_dropout_prob
__lowerCAmelCase: List[str] = attention_probs_dropout_prob
__lowerCAmelCase: Optional[int] = max_position_embeddings
__lowerCAmelCase: Union[str, Any] = type_vocab_size
__lowerCAmelCase: int = type_sequence_label_size
__lowerCAmelCase: Union[str, Any] = initializer_range
__lowerCAmelCase: Any = num_choices
def UpperCAmelCase ( self : Union[str, Any] ) -> Tuple:
__lowerCAmelCase: List[str] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__lowerCAmelCase: List[Any] = None
if self.use_attention_mask:
__lowerCAmelCase: List[str] = random_attention_mask([self.batch_size, self.seq_length] )
__lowerCAmelCase: Optional[Any] = None
if self.use_token_type_ids:
__lowerCAmelCase: List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__lowerCAmelCase: Optional[int] = AlbertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=UpperCAmelCase , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def UpperCAmelCase ( self : Dict ) -> Any:
__lowerCAmelCase: Optional[int] = self.prepare_config_and_inputs()
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase: Optional[Any] = config_and_inputs
__lowerCAmelCase: Tuple = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': attention_mask}
return config, inputs_dict
@require_flax
class A_ ( snake_case__ , unittest.TestCase ):
_lowercase : Dict = (
(
FlaxAlbertModel,
FlaxAlbertForPreTraining,
FlaxAlbertForMaskedLM,
FlaxAlbertForMultipleChoice,
FlaxAlbertForQuestionAnswering,
FlaxAlbertForSequenceClassification,
FlaxAlbertForTokenClassification,
FlaxAlbertForQuestionAnswering,
)
if is_flax_available()
else ()
)
def UpperCAmelCase ( self : List[str] ) -> Optional[int]:
__lowerCAmelCase: List[Any] = FlaxAlbertModelTester(self )
@slow
def UpperCAmelCase ( self : Tuple ) -> Dict:
for model_class_name in self.all_model_classes:
__lowerCAmelCase: Optional[Any] = model_class_name.from_pretrained('albert-base-v2' )
__lowerCAmelCase: Dict = model(np.ones((1, 1) ) )
self.assertIsNotNone(UpperCAmelCase )
@require_flax
class A_ ( unittest.TestCase ):
@slow
def UpperCAmelCase ( self : Any ) -> Any:
__lowerCAmelCase: List[Any] = FlaxAlbertModel.from_pretrained('albert-base-v2' )
__lowerCAmelCase: Optional[int] = np.array([[0, 3_4_5, 2_3_2, 3_2_8, 7_4_0, 1_4_0, 1_6_9_5, 6_9, 6_0_7_8, 1_5_8_8, 2]] )
__lowerCAmelCase: Tuple = np.array([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
__lowerCAmelCase: Tuple = model(UpperCAmelCase , attention_mask=UpperCAmelCase )[0]
__lowerCAmelCase: str = (1, 1_1, 7_6_8)
self.assertEqual(output.shape , UpperCAmelCase )
__lowerCAmelCase: List[str] = np.array(
[[[-0.6513, 1.5035, -0.2766], [-0.6515, 1.5046, -0.2780], [-0.6512, 1.5049, -0.2784]]] )
self.assertTrue(jnp.allclose(output[:, 1:4, 1:4] , UpperCAmelCase , atol=1E-4 ) )
| 322 | 1 |
import os
from pathlib import Path
def _a ( ) -> int:
"""simple docstring"""
from torch.utils.cpp_extension import load
__lowerCAmelCase: Any = Path(SCREAMING_SNAKE_CASE ).resolve().parent.parent.parent / 'kernels' / 'deformable_detr'
__lowerCAmelCase: Tuple = [
root / filename
for filename in [
'vision.cpp',
os.path.join('cpu' , 'ms_deform_attn_cpu.cpp' ),
os.path.join('cuda' , 'ms_deform_attn_cuda.cu' ),
]
]
load(
'MultiScaleDeformableAttention' , SCREAMING_SNAKE_CASE , with_cuda=SCREAMING_SNAKE_CASE , extra_include_paths=[str(SCREAMING_SNAKE_CASE )] , extra_cflags=['-DWITH_CUDA=1'] , extra_cuda_cflags=[
'-DCUDA_HAS_FP16=1',
'-D__CUDA_NO_HALF_OPERATORS__',
'-D__CUDA_NO_HALF_CONVERSIONS__',
'-D__CUDA_NO_HALF2_OPERATORS__',
] , )
import MultiScaleDeformableAttention as MSDA
return MSDA
| 322 |
import json
import os
import shutil
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import AutoConfig, BertConfig, GPTaConfig
from transformers.configuration_utils import PretrainedConfig
from transformers.testing_utils import TOKEN, USER, is_staging_test
sys.path.append(str(Path(__file__).parent.parent / '''utils'''))
from test_module.custom_configuration import CustomConfig # noqa E402
_a = {
'''return_dict''': False,
'''output_hidden_states''': True,
'''output_attentions''': True,
'''torchscript''': True,
'''torch_dtype''': '''float16''',
'''use_bfloat16''': True,
'''tf_legacy_loss''': True,
'''pruned_heads''': {'''a''': 1},
'''tie_word_embeddings''': False,
'''is_decoder''': True,
'''cross_attention_hidden_size''': 1_2_8,
'''add_cross_attention''': True,
'''tie_encoder_decoder''': True,
'''max_length''': 5_0,
'''min_length''': 3,
'''do_sample''': True,
'''early_stopping''': True,
'''num_beams''': 3,
'''num_beam_groups''': 3,
'''diversity_penalty''': 0.5,
'''temperature''': 2.0,
'''top_k''': 1_0,
'''top_p''': 0.7,
'''typical_p''': 0.2,
'''repetition_penalty''': 0.8,
'''length_penalty''': 0.8,
'''no_repeat_ngram_size''': 5,
'''encoder_no_repeat_ngram_size''': 5,
'''bad_words_ids''': [1, 2, 3],
'''num_return_sequences''': 3,
'''chunk_size_feed_forward''': 5,
'''output_scores''': True,
'''return_dict_in_generate''': True,
'''forced_bos_token_id''': 2,
'''forced_eos_token_id''': 3,
'''remove_invalid_values''': True,
'''architectures''': ['''BertModel'''],
'''finetuning_task''': '''translation''',
'''id2label''': {0: '''label'''},
'''label2id''': {'''label''': '''0'''},
'''tokenizer_class''': '''BertTokenizerFast''',
'''prefix''': '''prefix''',
'''bos_token_id''': 6,
'''pad_token_id''': 7,
'''eos_token_id''': 8,
'''sep_token_id''': 9,
'''decoder_start_token_id''': 1_0,
'''exponential_decay_length_penalty''': (5, 1.01),
'''suppress_tokens''': [0, 1],
'''begin_suppress_tokens''': 2,
'''task_specific_params''': {'''translation''': '''some_params'''},
'''problem_type''': '''regression''',
}
@is_staging_test
class A_ ( unittest.TestCase ):
@classmethod
def UpperCAmelCase ( cls : Dict ) -> List[str]:
__lowerCAmelCase: str = TOKEN
HfFolder.save_token(UpperCAmelCase )
@classmethod
def UpperCAmelCase ( cls : str ) -> List[Any]:
try:
delete_repo(token=cls._token , repo_id='test-config' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='valid_org/test-config-org' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='test-dynamic-config' )
except HTTPError:
pass
def UpperCAmelCase ( self : int ) -> Optional[int]:
__lowerCAmelCase: Any = BertConfig(
vocab_size=9_9 , hidden_size=3_2 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=3_7 )
config.push_to_hub('test-config' , use_auth_token=self._token )
__lowerCAmelCase: str = BertConfig.from_pretrained(F'''{USER}/test-config''' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(UpperCAmelCase , getattr(UpperCAmelCase , UpperCAmelCase ) )
# Reset repo
delete_repo(token=self._token , repo_id='test-config' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(UpperCAmelCase , repo_id='test-config' , push_to_hub=UpperCAmelCase , use_auth_token=self._token )
__lowerCAmelCase: Union[str, Any] = BertConfig.from_pretrained(F'''{USER}/test-config''' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(UpperCAmelCase , getattr(UpperCAmelCase , UpperCAmelCase ) )
def UpperCAmelCase ( self : int ) -> Dict:
__lowerCAmelCase: int = BertConfig(
vocab_size=9_9 , hidden_size=3_2 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=3_7 )
config.push_to_hub('valid_org/test-config-org' , use_auth_token=self._token )
__lowerCAmelCase: Dict = BertConfig.from_pretrained('valid_org/test-config-org' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(UpperCAmelCase , getattr(UpperCAmelCase , UpperCAmelCase ) )
# Reset repo
delete_repo(token=self._token , repo_id='valid_org/test-config-org' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(
UpperCAmelCase , repo_id='valid_org/test-config-org' , push_to_hub=UpperCAmelCase , use_auth_token=self._token )
__lowerCAmelCase: int = BertConfig.from_pretrained('valid_org/test-config-org' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(UpperCAmelCase , getattr(UpperCAmelCase , UpperCAmelCase ) )
def UpperCAmelCase ( self : Union[str, Any] ) -> List[str]:
CustomConfig.register_for_auto_class()
__lowerCAmelCase: Any = CustomConfig(attribute=4_2 )
config.push_to_hub('test-dynamic-config' , use_auth_token=self._token )
# This has added the proper auto_map field to the config
self.assertDictEqual(config.auto_map , {'AutoConfig': 'custom_configuration.CustomConfig'} )
__lowerCAmelCase: int = AutoConfig.from_pretrained(F'''{USER}/test-dynamic-config''' , trust_remote_code=UpperCAmelCase )
# Can't make an isinstance check because the new_config is from the FakeConfig class of a dynamic module
self.assertEqual(new_config.__class__.__name__ , 'CustomConfig' )
self.assertEqual(new_config.attribute , 4_2 )
class A_ ( unittest.TestCase ):
def UpperCAmelCase ( self : Union[str, Any] ) -> int:
__lowerCAmelCase: List[Any] = GPTaConfig()
# attempt to modify each of int/float/bool/str config records and verify they were updated
__lowerCAmelCase: Union[str, Any] = c.n_embd + 1 # int
__lowerCAmelCase: str = c.resid_pdrop + 1.0 # float
__lowerCAmelCase: List[Any] = not c.scale_attn_weights # bool
__lowerCAmelCase: List[str] = c.summary_type + 'foo' # str
c.update_from_string(
F'''n_embd={n_embd},resid_pdrop={resid_pdrop},scale_attn_weights={scale_attn_weights},summary_type={summary_type}''' )
self.assertEqual(UpperCAmelCase , c.n_embd , 'mismatch for key: n_embd' )
self.assertEqual(UpperCAmelCase , c.resid_pdrop , 'mismatch for key: resid_pdrop' )
self.assertEqual(UpperCAmelCase , c.scale_attn_weights , 'mismatch for key: scale_attn_weights' )
self.assertEqual(UpperCAmelCase , c.summary_type , 'mismatch for key: summary_type' )
def UpperCAmelCase ( self : Optional[Any] ) -> Any:
__lowerCAmelCase: str = PretrainedConfig()
__lowerCAmelCase: Optional[int] = [key for key in base_config.__dict__ if key not in config_common_kwargs]
# If this part of the test fails, you have arguments to addin config_common_kwargs above.
self.assertListEqual(
UpperCAmelCase , ['is_encoder_decoder', '_name_or_path', '_commit_hash', 'transformers_version'] )
__lowerCAmelCase: int = [key for key, value in config_common_kwargs.items() if value == getattr(UpperCAmelCase , UpperCAmelCase )]
if len(UpperCAmelCase ) > 0:
raise ValueError(
'The following keys are set with the default values in'
' `test_configuration_common.config_common_kwargs` pick another value for them:'
F''' {', '.join(UpperCAmelCase )}.''' )
def UpperCAmelCase ( self : int ) -> Optional[Any]:
with self.assertRaises(UpperCAmelCase ):
# config is in subfolder, the following should not work without specifying the subfolder
__lowerCAmelCase: List[Any] = BertConfig.from_pretrained('hf-internal-testing/tiny-random-bert-subfolder' )
__lowerCAmelCase: List[str] = BertConfig.from_pretrained('hf-internal-testing/tiny-random-bert-subfolder' , subfolder='bert' )
self.assertIsNotNone(UpperCAmelCase )
def UpperCAmelCase ( self : Tuple ) -> List[Any]:
# A mock response for an HTTP head request to emulate server down
__lowerCAmelCase: Union[str, Any] = mock.Mock()
__lowerCAmelCase: str = 5_0_0
__lowerCAmelCase: Optional[Any] = {}
__lowerCAmelCase: Optional[int] = HTTPError
__lowerCAmelCase: List[Any] = {}
# Download this model to make sure it's in the cache.
__lowerCAmelCase: Tuple = BertConfig.from_pretrained('hf-internal-testing/tiny-random-bert' )
# Under the mock environment we get a 500 error when trying to reach the model.
with mock.patch('requests.Session.request' , return_value=UpperCAmelCase ) as mock_head:
__lowerCAmelCase: Union[str, Any] = BertConfig.from_pretrained('hf-internal-testing/tiny-random-bert' )
# This check we did call the fake head request
mock_head.assert_called()
def UpperCAmelCase ( self : Any ) -> Optional[Any]:
# This test is for deprecated behavior and can be removed in v5
__lowerCAmelCase: Tuple = BertConfig.from_pretrained(
'https://huggingface.co/hf-internal-testing/tiny-random-bert/resolve/main/config.json' )
def UpperCAmelCase ( self : Dict ) -> str:
__lowerCAmelCase: Optional[Any] = AutoConfig.from_pretrained('bert-base-cased' )
__lowerCAmelCase: Optional[Any] = ['config.4.0.0.json']
with tempfile.TemporaryDirectory() as tmp_dir:
configuration.save_pretrained(UpperCAmelCase )
__lowerCAmelCase: Tuple = 2
json.dump(configuration.to_dict() , open(os.path.join(UpperCAmelCase , 'config.4.0.0.json' ) , 'w' ) )
# This should pick the new configuration file as the version of Transformers is > 4.0.0
__lowerCAmelCase: Dict = AutoConfig.from_pretrained(UpperCAmelCase )
self.assertEqual(new_configuration.hidden_size , 2 )
# Will need to be adjusted if we reach v42 and this test is still here.
# Should pick the old configuration file as the version of Transformers is < 4.42.0
__lowerCAmelCase: Dict = ['config.42.0.0.json']
__lowerCAmelCase: Optional[int] = 7_6_8
configuration.save_pretrained(UpperCAmelCase )
shutil.move(os.path.join(UpperCAmelCase , 'config.4.0.0.json' ) , os.path.join(UpperCAmelCase , 'config.42.0.0.json' ) )
__lowerCAmelCase: int = AutoConfig.from_pretrained(UpperCAmelCase )
self.assertEqual(new_configuration.hidden_size , 7_6_8 )
def UpperCAmelCase ( self : Union[str, Any] ) -> Dict:
# This repo has two configuration files, one for v4.0.0 and above with a different hidden size.
__lowerCAmelCase: Tuple = 'hf-internal-testing/test-two-configs'
import transformers as new_transformers
__lowerCAmelCase: List[Any] = 'v4.0.0'
__lowerCAmelCase , __lowerCAmelCase: Any = new_transformers.models.auto.AutoConfig.from_pretrained(
UpperCAmelCase , return_unused_kwargs=UpperCAmelCase )
self.assertEqual(new_configuration.hidden_size , 2 )
# This checks `_configuration_file` ia not kept in the kwargs by mistake.
self.assertDictEqual(UpperCAmelCase , {} )
# Testing an older version by monkey-patching the version in the module it's used.
import transformers as old_transformers
__lowerCAmelCase: List[Any] = 'v3.0.0'
__lowerCAmelCase: Union[str, Any] = old_transformers.models.auto.AutoConfig.from_pretrained(UpperCAmelCase )
self.assertEqual(old_configuration.hidden_size , 7_6_8 )
| 322 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_a = logging.get_logger(__name__)
_a = {
'''facebook/timesformer''': '''https://huggingface.co/facebook/timesformer/resolve/main/config.json''',
}
class A_ ( snake_case__ ):
_lowercase : Union[str, Any] = 'timesformer'
def __init__( self : Optional[int] , UpperCAmelCase : Tuple=2_2_4 , UpperCAmelCase : Any=1_6 , UpperCAmelCase : Optional[Any]=3 , UpperCAmelCase : Optional[Any]=8 , UpperCAmelCase : Optional[int]=7_6_8 , UpperCAmelCase : int=1_2 , UpperCAmelCase : str=1_2 , UpperCAmelCase : Union[str, Any]=3_0_7_2 , UpperCAmelCase : Union[str, Any]="gelu" , UpperCAmelCase : int=0.0 , UpperCAmelCase : Tuple=0.0 , UpperCAmelCase : int=0.02 , UpperCAmelCase : List[str]=1E-6 , UpperCAmelCase : Union[str, Any]=True , UpperCAmelCase : List[str]="divided_space_time" , UpperCAmelCase : Optional[Any]=0 , **UpperCAmelCase : Any , ) -> Any:
super().__init__(**UpperCAmelCase )
__lowerCAmelCase: Tuple = image_size
__lowerCAmelCase: Tuple = patch_size
__lowerCAmelCase: Optional[int] = num_channels
__lowerCAmelCase: Dict = num_frames
__lowerCAmelCase: str = hidden_size
__lowerCAmelCase: Optional[Any] = num_hidden_layers
__lowerCAmelCase: Optional[Any] = num_attention_heads
__lowerCAmelCase: str = intermediate_size
__lowerCAmelCase: int = hidden_act
__lowerCAmelCase: List[str] = hidden_dropout_prob
__lowerCAmelCase: Dict = attention_probs_dropout_prob
__lowerCAmelCase: str = initializer_range
__lowerCAmelCase: Any = layer_norm_eps
__lowerCAmelCase: Any = qkv_bias
__lowerCAmelCase: Any = attention_type
__lowerCAmelCase: List[str] = drop_path_rate
| 322 |
_a = [sum(int(c, 1_0) ** 2 for c in i.__str__()) for i in range(1_0_0_0_0_0)]
def _a ( SCREAMING_SNAKE_CASE : int ) -> int:
"""simple docstring"""
__lowerCAmelCase: Optional[int] = 0
while number:
# Increased Speed Slightly by checking every 5 digits together.
sum_of_digits_squared += DIGITS_SQUARED[number % 10_00_00]
number //= 10_00_00
return sum_of_digits_squared
# There are 2 Chains made,
# One ends with 89 with the chain member 58 being the one which when declared first,
# there will be the least number of iterations for all the members to be checked.
# The other one ends with 1 and has only one element 1.
# So 58 and 1 are chosen to be declared at the starting.
# Changed dictionary to an array to quicken the solution
_a = [None] * 1_0_0_0_0_0_0_0
_a = True
_a = False
def _a ( SCREAMING_SNAKE_CASE : int ) -> bool:
"""simple docstring"""
if CHAINS[number - 1] is not None:
return CHAINS[number - 1] # type: ignore
__lowerCAmelCase: int = chain(next_number(SCREAMING_SNAKE_CASE ) )
__lowerCAmelCase: Tuple = number_chain
while number < 10_00_00_00:
__lowerCAmelCase: Dict = number_chain
number *= 10
return number_chain
def _a ( SCREAMING_SNAKE_CASE : int = 10_00_00_00 ) -> int:
"""simple docstring"""
for i in range(1 , SCREAMING_SNAKE_CASE ):
if CHAINS[i] is None:
chain(i + 1 )
return CHAINS[:number].count(SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
import doctest
doctest.testmod()
print(f"{solution() = }")
| 322 | 1 |
import random
import unittest
from torch.utils.data import BatchSampler, DataLoader, IterableDataset
from accelerate import Accelerator
from accelerate.data_loader import (
BatchSamplerShard,
DataLoaderDispatcher,
DataLoaderShard,
IterableDatasetShard,
SkipBatchSampler,
SkipDataLoader,
skip_first_batches,
)
class A_ ( snake_case__ ):
def __init__( self : List[Any] , UpperCAmelCase : str=0.01 , UpperCAmelCase : Tuple=1_0_0_0 ) -> str:
__lowerCAmelCase: Dict = p_stop
__lowerCAmelCase: Any = max_length
def __iter__( self : Any ) -> Dict:
__lowerCAmelCase: List[Any] = 0
__lowerCAmelCase: List[str] = False
while not stop and count < self.max_length:
yield count
count += 1
__lowerCAmelCase: Optional[Any] = random.random() < self.p_stop
class A_ ( unittest.TestCase ):
def UpperCAmelCase ( self : Optional[Any] , UpperCAmelCase : List[str] , UpperCAmelCase : str , UpperCAmelCase : Any=False , UpperCAmelCase : Union[str, Any]=True ) -> Optional[Any]:
__lowerCAmelCase: Optional[Any] = [
BatchSamplerShard(UpperCAmelCase , 2 , UpperCAmelCase , split_batches=UpperCAmelCase , even_batches=UpperCAmelCase )
for i in range(2 )
]
__lowerCAmelCase: Any = [list(UpperCAmelCase ) for batch_sampler_shard in batch_sampler_shards]
if not split_batches:
self.assertListEqual([len(UpperCAmelCase ) for shard in batch_sampler_shards] , [len(UpperCAmelCase ) for e in expected] )
self.assertListEqual(UpperCAmelCase , UpperCAmelCase )
def UpperCAmelCase ( self : int ) -> Dict:
# Check the shards when the dataset is a round multiple of total batch size.
__lowerCAmelCase: List[str] = BatchSampler(range(2_4 ) , batch_size=3 , drop_last=UpperCAmelCase )
__lowerCAmelCase: Dict = [
[[0, 1, 2], [6, 7, 8], [1_2, 1_3, 1_4], [1_8, 1_9, 2_0]],
[[3, 4, 5], [9, 1_0, 1_1], [1_5, 1_6, 1_7], [2_1, 2_2, 2_3]],
]
self.check_batch_sampler_shards(UpperCAmelCase , UpperCAmelCase )
__lowerCAmelCase: str = BatchSampler(range(2_4 ) , batch_size=3 , drop_last=UpperCAmelCase )
# Expected shouldn't change
self.check_batch_sampler_shards(UpperCAmelCase , UpperCAmelCase )
# Check the shards when the dataset is a round multiple of batch size but not total batch size.
__lowerCAmelCase: List[Any] = BatchSampler(range(2_1 ) , batch_size=3 , drop_last=UpperCAmelCase )
__lowerCAmelCase: str = [
[[0, 1, 2], [6, 7, 8], [1_2, 1_3, 1_4], [1_8, 1_9, 2_0]],
[[3, 4, 5], [9, 1_0, 1_1], [1_5, 1_6, 1_7], [0, 1, 2]],
]
self.check_batch_sampler_shards(UpperCAmelCase , UpperCAmelCase )
__lowerCAmelCase: Any = BatchSampler(range(2_1 ) , batch_size=3 , drop_last=UpperCAmelCase )
__lowerCAmelCase: int = [
[[0, 1, 2], [6, 7, 8], [1_2, 1_3, 1_4]],
[[3, 4, 5], [9, 1_0, 1_1], [1_5, 1_6, 1_7]],
]
self.check_batch_sampler_shards(UpperCAmelCase , UpperCAmelCase )
# Check the shards when the dataset is not a round multiple of batch size but has a multiple of
# num_processes batch.
__lowerCAmelCase: Optional[Any] = BatchSampler(range(2_2 ) , batch_size=3 , drop_last=UpperCAmelCase )
__lowerCAmelCase: int = [
[[0, 1, 2], [6, 7, 8], [1_2, 1_3, 1_4], [1_8, 1_9, 2_0]],
[[3, 4, 5], [9, 1_0, 1_1], [1_5, 1_6, 1_7], [2_1, 0, 1]],
]
self.check_batch_sampler_shards(UpperCAmelCase , UpperCAmelCase )
__lowerCAmelCase: Optional[Any] = BatchSampler(range(2_2 ) , batch_size=3 , drop_last=UpperCAmelCase )
__lowerCAmelCase: str = [
[[0, 1, 2], [6, 7, 8], [1_2, 1_3, 1_4]],
[[3, 4, 5], [9, 1_0, 1_1], [1_5, 1_6, 1_7]],
]
self.check_batch_sampler_shards(UpperCAmelCase , UpperCAmelCase )
# Check the shards when the dataset is not a round multiple of batch size but and has not a multiple of
# num_processes batch.
__lowerCAmelCase: Any = BatchSampler(range(2_0 ) , batch_size=3 , drop_last=UpperCAmelCase )
__lowerCAmelCase: Optional[int] = [
[[0, 1, 2], [6, 7, 8], [1_2, 1_3, 1_4], [1_8, 1_9, 0]],
[[3, 4, 5], [9, 1_0, 1_1], [1_5, 1_6, 1_7], [1, 2, 3]],
]
self.check_batch_sampler_shards(UpperCAmelCase , UpperCAmelCase )
__lowerCAmelCase: List[Any] = BatchSampler(range(2_0 ) , batch_size=3 , drop_last=UpperCAmelCase )
__lowerCAmelCase: Optional[Any] = [
[[0, 1, 2], [6, 7, 8], [1_2, 1_3, 1_4]],
[[3, 4, 5], [9, 1_0, 1_1], [1_5, 1_6, 1_7]],
]
self.check_batch_sampler_shards(UpperCAmelCase , UpperCAmelCase )
# Check the shards when the dataset is very small.
__lowerCAmelCase: Union[str, Any] = BatchSampler(range(2 ) , batch_size=3 , drop_last=UpperCAmelCase )
__lowerCAmelCase: Tuple = [[[0, 1, 0]], [[1, 0, 1]]]
self.check_batch_sampler_shards(UpperCAmelCase , UpperCAmelCase )
__lowerCAmelCase: Any = BatchSampler(range(2 ) , batch_size=3 , drop_last=UpperCAmelCase )
__lowerCAmelCase: int = [[], []]
self.check_batch_sampler_shards(UpperCAmelCase , UpperCAmelCase )
def UpperCAmelCase ( self : Any ) -> List[str]:
# Check the shards when the dataset is a round multiple of batch size.
__lowerCAmelCase: int = BatchSampler(range(2_4 ) , batch_size=4 , drop_last=UpperCAmelCase )
__lowerCAmelCase: Optional[Any] = [
[[0, 1], [4, 5], [8, 9], [1_2, 1_3], [1_6, 1_7], [2_0, 2_1]],
[[2, 3], [6, 7], [1_0, 1_1], [1_4, 1_5], [1_8, 1_9], [2_2, 2_3]],
]
self.check_batch_sampler_shards(UpperCAmelCase , UpperCAmelCase , split_batches=UpperCAmelCase )
__lowerCAmelCase: int = BatchSampler(range(2_4 ) , batch_size=4 , drop_last=UpperCAmelCase )
# Expected shouldn't change
self.check_batch_sampler_shards(UpperCAmelCase , UpperCAmelCase , split_batches=UpperCAmelCase )
# Check the shards when the dataset is not a round multiple of batch size.
__lowerCAmelCase: Optional[Any] = BatchSampler(range(2_2 ) , batch_size=4 , drop_last=UpperCAmelCase )
__lowerCAmelCase: str = [
[[0, 1], [4, 5], [8, 9], [1_2, 1_3], [1_6, 1_7], [2_0, 2_1]],
[[2, 3], [6, 7], [1_0, 1_1], [1_4, 1_5], [1_8, 1_9], [0, 1]],
]
self.check_batch_sampler_shards(UpperCAmelCase , UpperCAmelCase , split_batches=UpperCAmelCase )
__lowerCAmelCase: Union[str, Any] = BatchSampler(range(2_2 ) , batch_size=4 , drop_last=UpperCAmelCase )
__lowerCAmelCase: int = [
[[0, 1], [4, 5], [8, 9], [1_2, 1_3], [1_6, 1_7]],
[[2, 3], [6, 7], [1_0, 1_1], [1_4, 1_5], [1_8, 1_9]],
]
self.check_batch_sampler_shards(UpperCAmelCase , UpperCAmelCase , split_batches=UpperCAmelCase )
# Check the shards when the dataset is not a round multiple of batch size or num_processes.
__lowerCAmelCase: Union[str, Any] = BatchSampler(range(2_1 ) , batch_size=4 , drop_last=UpperCAmelCase )
__lowerCAmelCase: List[Any] = [
[[0, 1], [4, 5], [8, 9], [1_2, 1_3], [1_6, 1_7], [2_0, 0]],
[[2, 3], [6, 7], [1_0, 1_1], [1_4, 1_5], [1_8, 1_9], [1, 2]],
]
self.check_batch_sampler_shards(UpperCAmelCase , UpperCAmelCase , split_batches=UpperCAmelCase )
__lowerCAmelCase: str = BatchSampler(range(2_1 ) , batch_size=4 , drop_last=UpperCAmelCase )
__lowerCAmelCase: Dict = [
[[0, 1], [4, 5], [8, 9], [1_2, 1_3], [1_6, 1_7]],
[[2, 3], [6, 7], [1_0, 1_1], [1_4, 1_5], [1_8, 1_9]],
]
self.check_batch_sampler_shards(UpperCAmelCase , UpperCAmelCase , split_batches=UpperCAmelCase )
# Check the shards when the dataset is very small.
__lowerCAmelCase: str = BatchSampler(range(2 ) , batch_size=4 , drop_last=UpperCAmelCase )
__lowerCAmelCase: Dict = [[[0, 1]], [[0, 1]]]
self.check_batch_sampler_shards(UpperCAmelCase , UpperCAmelCase , split_batches=UpperCAmelCase )
__lowerCAmelCase: str = BatchSampler(range(2 ) , batch_size=4 , drop_last=UpperCAmelCase )
__lowerCAmelCase: int = [[], []]
self.check_batch_sampler_shards(UpperCAmelCase , UpperCAmelCase , split_batches=UpperCAmelCase )
def UpperCAmelCase ( self : Tuple ) -> Union[str, Any]:
# Check the shards when the dataset is a round multiple of total batch size.
__lowerCAmelCase: int = BatchSampler(range(2_4 ) , batch_size=3 , drop_last=UpperCAmelCase )
__lowerCAmelCase: str = [
[[0, 1, 2], [6, 7, 8], [1_2, 1_3, 1_4], [1_8, 1_9, 2_0]],
[[3, 4, 5], [9, 1_0, 1_1], [1_5, 1_6, 1_7], [2_1, 2_2, 2_3]],
]
self.check_batch_sampler_shards(UpperCAmelCase , UpperCAmelCase , even_batches=UpperCAmelCase )
__lowerCAmelCase: int = BatchSampler(range(2_4 ) , batch_size=3 , drop_last=UpperCAmelCase )
# Expected shouldn't change
self.check_batch_sampler_shards(UpperCAmelCase , UpperCAmelCase , even_batches=UpperCAmelCase )
# Check the shards when the dataset is a round multiple of batch size but not total batch size.
__lowerCAmelCase: int = BatchSampler(range(2_1 ) , batch_size=3 , drop_last=UpperCAmelCase )
__lowerCAmelCase: List[str] = [
[[0, 1, 2], [6, 7, 8], [1_2, 1_3, 1_4], [1_8, 1_9, 2_0]],
[[3, 4, 5], [9, 1_0, 1_1], [1_5, 1_6, 1_7]],
]
self.check_batch_sampler_shards(UpperCAmelCase , UpperCAmelCase , even_batches=UpperCAmelCase )
__lowerCAmelCase: Tuple = BatchSampler(range(2_1 ) , batch_size=3 , drop_last=UpperCAmelCase )
__lowerCAmelCase: Optional[int] = [
[[0, 1, 2], [6, 7, 8], [1_2, 1_3, 1_4]],
[[3, 4, 5], [9, 1_0, 1_1], [1_5, 1_6, 1_7]],
]
self.check_batch_sampler_shards(UpperCAmelCase , UpperCAmelCase , even_batches=UpperCAmelCase )
# Check the shards when the dataset is not a round multiple of batch size but has a multiple of
# num_processes batch.
__lowerCAmelCase: str = BatchSampler(range(2_2 ) , batch_size=3 , drop_last=UpperCAmelCase )
__lowerCAmelCase: Union[str, Any] = [
[[0, 1, 2], [6, 7, 8], [1_2, 1_3, 1_4], [1_8, 1_9, 2_0]],
[[3, 4, 5], [9, 1_0, 1_1], [1_5, 1_6, 1_7], [2_1]],
]
self.check_batch_sampler_shards(UpperCAmelCase , UpperCAmelCase , even_batches=UpperCAmelCase )
__lowerCAmelCase: List[Any] = BatchSampler(range(2_2 ) , batch_size=3 , drop_last=UpperCAmelCase )
__lowerCAmelCase: str = [
[[0, 1, 2], [6, 7, 8], [1_2, 1_3, 1_4]],
[[3, 4, 5], [9, 1_0, 1_1], [1_5, 1_6, 1_7]],
]
self.check_batch_sampler_shards(UpperCAmelCase , UpperCAmelCase , even_batches=UpperCAmelCase )
# Check the shards when the dataset is not a round multiple of batch size but and has not a multiple of
# num_processes batch.
__lowerCAmelCase: List[Any] = BatchSampler(range(2_0 ) , batch_size=3 , drop_last=UpperCAmelCase )
__lowerCAmelCase: Optional[Any] = [
[[0, 1, 2], [6, 7, 8], [1_2, 1_3, 1_4], [1_8, 1_9]],
[[3, 4, 5], [9, 1_0, 1_1], [1_5, 1_6, 1_7]],
]
self.check_batch_sampler_shards(UpperCAmelCase , UpperCAmelCase , even_batches=UpperCAmelCase )
__lowerCAmelCase: List[str] = BatchSampler(range(2_0 ) , batch_size=3 , drop_last=UpperCAmelCase )
__lowerCAmelCase: int = [
[[0, 1, 2], [6, 7, 8], [1_2, 1_3, 1_4]],
[[3, 4, 5], [9, 1_0, 1_1], [1_5, 1_6, 1_7]],
]
self.check_batch_sampler_shards(UpperCAmelCase , UpperCAmelCase , even_batches=UpperCAmelCase )
# Check the shards when the dataset is very small.
__lowerCAmelCase: int = BatchSampler(range(2 ) , batch_size=3 , drop_last=UpperCAmelCase )
__lowerCAmelCase: List[str] = [[[0, 1]], []]
self.check_batch_sampler_shards(UpperCAmelCase , UpperCAmelCase , even_batches=UpperCAmelCase )
__lowerCAmelCase: Union[str, Any] = BatchSampler(range(2 ) , batch_size=3 , drop_last=UpperCAmelCase )
__lowerCAmelCase: Tuple = [[], []]
self.check_batch_sampler_shards(UpperCAmelCase , UpperCAmelCase , even_batches=UpperCAmelCase )
def UpperCAmelCase ( self : Optional[Any] ) -> List[str]:
# Check the shards when the dataset is a round multiple of batch size.
__lowerCAmelCase: int = BatchSampler(range(2_4 ) , batch_size=4 , drop_last=UpperCAmelCase )
__lowerCAmelCase: Any = [
[[0, 1], [4, 5], [8, 9], [1_2, 1_3], [1_6, 1_7], [2_0, 2_1]],
[[2, 3], [6, 7], [1_0, 1_1], [1_4, 1_5], [1_8, 1_9], [2_2, 2_3]],
]
self.check_batch_sampler_shards(UpperCAmelCase , UpperCAmelCase , split_batches=UpperCAmelCase , even_batches=UpperCAmelCase )
__lowerCAmelCase: Optional[Any] = BatchSampler(range(2_4 ) , batch_size=4 , drop_last=UpperCAmelCase )
# Expected shouldn't change
self.check_batch_sampler_shards(UpperCAmelCase , UpperCAmelCase , split_batches=UpperCAmelCase , even_batches=UpperCAmelCase )
# Check the shards when the dataset is not a round multiple of batch size.
__lowerCAmelCase: str = BatchSampler(range(2_2 ) , batch_size=4 , drop_last=UpperCAmelCase )
__lowerCAmelCase: List[Any] = [
[[0, 1], [4, 5], [8, 9], [1_2, 1_3], [1_6, 1_7], [2_0, 2_1]],
[[2, 3], [6, 7], [1_0, 1_1], [1_4, 1_5], [1_8, 1_9]],
]
self.check_batch_sampler_shards(UpperCAmelCase , UpperCAmelCase , split_batches=UpperCAmelCase , even_batches=UpperCAmelCase )
__lowerCAmelCase: str = BatchSampler(range(2_2 ) , batch_size=4 , drop_last=UpperCAmelCase )
__lowerCAmelCase: Dict = [
[[0, 1], [4, 5], [8, 9], [1_2, 1_3], [1_6, 1_7]],
[[2, 3], [6, 7], [1_0, 1_1], [1_4, 1_5], [1_8, 1_9]],
]
self.check_batch_sampler_shards(UpperCAmelCase , UpperCAmelCase , split_batches=UpperCAmelCase , even_batches=UpperCAmelCase )
# Check the shards when the dataset is not a round multiple of batch size or num_processes.
__lowerCAmelCase: Tuple = BatchSampler(range(2_1 ) , batch_size=4 , drop_last=UpperCAmelCase )
__lowerCAmelCase: Optional[Any] = [
[[0, 1], [4, 5], [8, 9], [1_2, 1_3], [1_6, 1_7], [2_0]],
[[2, 3], [6, 7], [1_0, 1_1], [1_4, 1_5], [1_8, 1_9]],
]
self.check_batch_sampler_shards(UpperCAmelCase , UpperCAmelCase , split_batches=UpperCAmelCase , even_batches=UpperCAmelCase )
__lowerCAmelCase: Optional[Any] = BatchSampler(range(2_1 ) , batch_size=4 , drop_last=UpperCAmelCase )
__lowerCAmelCase: Union[str, Any] = [
[[0, 1], [4, 5], [8, 9], [1_2, 1_3], [1_6, 1_7]],
[[2, 3], [6, 7], [1_0, 1_1], [1_4, 1_5], [1_8, 1_9]],
]
self.check_batch_sampler_shards(UpperCAmelCase , UpperCAmelCase , split_batches=UpperCAmelCase , even_batches=UpperCAmelCase )
# Check the shards when the dataset is very small.
__lowerCAmelCase: Optional[Any] = BatchSampler(range(2 ) , batch_size=4 , drop_last=UpperCAmelCase )
__lowerCAmelCase: Dict = [[[0, 1]], []]
self.check_batch_sampler_shards(UpperCAmelCase , UpperCAmelCase , split_batches=UpperCAmelCase , even_batches=UpperCAmelCase )
__lowerCAmelCase: List[str] = BatchSampler(range(2 ) , batch_size=4 , drop_last=UpperCAmelCase )
__lowerCAmelCase: int = [[], []]
self.check_batch_sampler_shards(UpperCAmelCase , UpperCAmelCase , split_batches=UpperCAmelCase , even_batches=UpperCAmelCase )
def UpperCAmelCase ( self : Any ) -> Optional[int]:
__lowerCAmelCase: Dict = [[0, 1, 2], [3, 4], [5, 6, 7, 8], [9, 1_0, 1_1], [1_2, 1_3]]
__lowerCAmelCase: Optional[int] = [BatchSamplerShard(UpperCAmelCase , 2 , UpperCAmelCase , even_batches=UpperCAmelCase ) for i in range(2 )]
self.assertEqual(len(batch_sampler_shards[0] ) , 3 )
self.assertEqual(len(batch_sampler_shards[1] ) , 2 )
self.assertListEqual(list(batch_sampler_shards[0] ) , [[0, 1, 2], [5, 6, 7, 8], [1_2, 1_3]] )
self.assertListEqual(list(batch_sampler_shards[1] ) , [[3, 4], [9, 1_0, 1_1]] )
def UpperCAmelCase ( self : List[str] , UpperCAmelCase : Tuple , UpperCAmelCase : int , UpperCAmelCase : List[str] , UpperCAmelCase : Dict=False , UpperCAmelCase : Union[str, Any]=2 , UpperCAmelCase : Union[str, Any]=False ) -> Union[str, Any]:
random.seed(UpperCAmelCase )
__lowerCAmelCase: Optional[int] = list(UpperCAmelCase )
__lowerCAmelCase: Optional[int] = [
IterableDatasetShard(
UpperCAmelCase , batch_size=UpperCAmelCase , drop_last=UpperCAmelCase , num_processes=UpperCAmelCase , process_index=UpperCAmelCase , split_batches=UpperCAmelCase , )
for i in range(UpperCAmelCase )
]
__lowerCAmelCase: List[str] = []
for iterable_dataset_shard in iterable_dataset_shards:
# Since our random iterable dataset will be... random... we need to use a seed to get reproducible results.
random.seed(UpperCAmelCase )
iterable_dataset_lists.append(list(UpperCAmelCase ) )
__lowerCAmelCase: List[str] = batch_size // num_processes if split_batches else batch_size
# All iterable dataset shard should have the same length, a round multiple of shard_batch_size
__lowerCAmelCase: List[Any] = iterable_dataset_lists[0]
for l in iterable_dataset_lists[1:]:
self.assertEqual(len(UpperCAmelCase ) , len(UpperCAmelCase ) )
self.assertTrue(len(UpperCAmelCase ) % shard_batch_size == 0 )
__lowerCAmelCase: List[str] = []
for idx in range(0 , len(UpperCAmelCase ) , UpperCAmelCase ):
for l in iterable_dataset_lists:
observed += l[idx : idx + shard_batch_size]
if not drop_last:
while len(UpperCAmelCase ) < len(UpperCAmelCase ):
reference += reference
self.assertListEqual(UpperCAmelCase , reference[: len(UpperCAmelCase )] )
def UpperCAmelCase ( self : Optional[Any] ) -> List[str]:
__lowerCAmelCase: List[str] = 4_2
__lowerCAmelCase: Optional[Any] = RandomIterableDataset()
self.check_iterable_dataset_shards(UpperCAmelCase , UpperCAmelCase , batch_size=4 , drop_last=UpperCAmelCase , split_batches=UpperCAmelCase )
self.check_iterable_dataset_shards(UpperCAmelCase , UpperCAmelCase , batch_size=4 , drop_last=UpperCAmelCase , split_batches=UpperCAmelCase )
self.check_iterable_dataset_shards(UpperCAmelCase , UpperCAmelCase , batch_size=4 , drop_last=UpperCAmelCase , split_batches=UpperCAmelCase )
self.check_iterable_dataset_shards(UpperCAmelCase , UpperCAmelCase , batch_size=4 , drop_last=UpperCAmelCase , split_batches=UpperCAmelCase )
# Edge case with a very small dataset
__lowerCAmelCase: int = RandomIterableDataset(max_length=2 )
self.check_iterable_dataset_shards(UpperCAmelCase , UpperCAmelCase , batch_size=4 , drop_last=UpperCAmelCase , split_batches=UpperCAmelCase )
self.check_iterable_dataset_shards(UpperCAmelCase , UpperCAmelCase , batch_size=4 , drop_last=UpperCAmelCase , split_batches=UpperCAmelCase )
self.check_iterable_dataset_shards(UpperCAmelCase , UpperCAmelCase , batch_size=4 , drop_last=UpperCAmelCase , split_batches=UpperCAmelCase )
self.check_iterable_dataset_shards(UpperCAmelCase , UpperCAmelCase , batch_size=4 , drop_last=UpperCAmelCase , split_batches=UpperCAmelCase )
def UpperCAmelCase ( self : int ) -> int:
__lowerCAmelCase: Dict = BatchSampler(range(1_6 ) , batch_size=4 , drop_last=UpperCAmelCase )
__lowerCAmelCase: Optional[Any] = SkipBatchSampler(UpperCAmelCase , 2 )
self.assertListEqual(list(UpperCAmelCase ) , [[8, 9, 1_0, 1_1], [1_2, 1_3, 1_4, 1_5]] )
def UpperCAmelCase ( self : Dict ) -> List[Any]:
__lowerCAmelCase: Union[str, Any] = SkipDataLoader(list(range(1_6 ) ) , batch_size=4 , skip_batches=2 )
self.assertListEqual([t.tolist() for t in dataloader] , [[8, 9, 1_0, 1_1], [1_2, 1_3, 1_4, 1_5]] )
def UpperCAmelCase ( self : Any ) -> str:
__lowerCAmelCase: str = DataLoader(list(range(1_6 ) ) , batch_size=4 )
__lowerCAmelCase: int = skip_first_batches(UpperCAmelCase , num_batches=2 )
self.assertListEqual([t.tolist() for t in new_dataloader] , [[8, 9, 1_0, 1_1], [1_2, 1_3, 1_4, 1_5]] )
def UpperCAmelCase ( self : Tuple ) -> List[str]:
__lowerCAmelCase: Any = DataLoaderShard(list(range(1_6 ) ) , batch_size=4 )
for idx, _ in enumerate(UpperCAmelCase ):
self.assertEqual(dataloader.end_of_dataloader , idx == 3 )
# Test it also works on the second iteration
for idx, _ in enumerate(UpperCAmelCase ):
self.assertEqual(dataloader.end_of_dataloader , idx == 3 )
def UpperCAmelCase ( self : Optional[int] ) -> List[str]:
Accelerator()
__lowerCAmelCase: List[Any] = DataLoaderDispatcher(range(1_6 ) , batch_size=4 )
for idx, _ in enumerate(UpperCAmelCase ):
self.assertEqual(dataloader.end_of_dataloader , idx == 3 )
# Test it also works on the second iteration
for idx, _ in enumerate(UpperCAmelCase ):
self.assertEqual(dataloader.end_of_dataloader , idx == 3 )
| 322 |
def _a ( SCREAMING_SNAKE_CASE : int ) -> bool:
"""simple docstring"""
if not isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
__lowerCAmelCase: List[Any] = f'''Input value of [number={number}] must be an integer'''
raise TypeError(SCREAMING_SNAKE_CASE )
if number < 0:
return False
__lowerCAmelCase: str = number * number
while number > 0:
if number % 10 != number_square % 10:
return False
number //= 10
number_square //= 10
return True
if __name__ == "__main__":
import doctest
doctest.testmod()
| 322 | 1 |
from manim import *
class A_ ( snake_case__ ):
def UpperCAmelCase ( self : Dict ) -> Tuple:
__lowerCAmelCase: Dict = Rectangle(height=0.5 , width=0.5 )
__lowerCAmelCase: Union[str, Any] = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0 )
__lowerCAmelCase: str = [mem.copy() for i in range(6 )]
__lowerCAmelCase: Dict = [mem.copy() for i in range(6 )]
__lowerCAmelCase: Any = VGroup(*UpperCAmelCase ).arrange(UpperCAmelCase , buff=0 )
__lowerCAmelCase: Any = VGroup(*UpperCAmelCase ).arrange(UpperCAmelCase , buff=0 )
__lowerCAmelCase: Tuple = VGroup(UpperCAmelCase , UpperCAmelCase ).arrange(UpperCAmelCase , buff=0 )
__lowerCAmelCase: Any = Text('CPU' , font_size=2_4 )
__lowerCAmelCase: Tuple = Group(UpperCAmelCase , UpperCAmelCase ).arrange(UpperCAmelCase , buff=0.5 , aligned_edge=UpperCAmelCase )
cpu.move_to([-2.5, -0.5, 0] )
self.add(UpperCAmelCase )
__lowerCAmelCase: Optional[int] = [mem.copy() for i in range(1 )]
__lowerCAmelCase: List[str] = VGroup(*UpperCAmelCase ).arrange(UpperCAmelCase , buff=0 )
__lowerCAmelCase: Optional[Any] = Text('GPU' , font_size=2_4 )
__lowerCAmelCase: Union[str, Any] = Group(UpperCAmelCase , UpperCAmelCase ).arrange(UpperCAmelCase , buff=0.5 , aligned_edge=UpperCAmelCase )
gpu.align_to(UpperCAmelCase , UpperCAmelCase )
gpu.set_x(gpu.get_x() - 1 )
self.add(UpperCAmelCase )
__lowerCAmelCase: Optional[Any] = [mem.copy() for i in range(6 )]
__lowerCAmelCase: Any = VGroup(*UpperCAmelCase ).arrange(UpperCAmelCase , buff=0 )
__lowerCAmelCase: Optional[int] = Text('Model' , font_size=2_4 )
__lowerCAmelCase: Optional[Any] = Group(UpperCAmelCase , UpperCAmelCase ).arrange(UpperCAmelCase , buff=0.5 , aligned_edge=UpperCAmelCase )
model.move_to([3, -1.0, 0] )
self.play(
Create(UpperCAmelCase , run_time=1 ) , Create(UpperCAmelCase , run_time=1 ) , Create(UpperCAmelCase , run_time=1 ) , )
__lowerCAmelCase: Any = MarkupText(
F'''First, an empty model skeleton is loaded\ninto <span fgcolor=\'{YELLOW}\'>memory</span> without using much RAM.''' , font_size=2_4 , )
__lowerCAmelCase: List[str] = Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
__lowerCAmelCase: Tuple = MarkupText(
F'''<b>Key:</b>\n\n<span fgcolor=\'{YELLOW}\'>●</span> Empty Model''' , font_size=1_8 , )
key_text.move_to([-5, 2.4, 0] )
step_a.move_to([2, 2, 0] )
self.play(Write(UpperCAmelCase , run_time=2.5 ) , Write(UpperCAmelCase ) , Write(UpperCAmelCase ) )
self.add(UpperCAmelCase )
__lowerCAmelCase: Tuple = []
__lowerCAmelCase: Any = []
__lowerCAmelCase: Union[str, Any] = []
for i, rect in enumerate(UpperCAmelCase ):
__lowerCAmelCase: List[Any] = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0.0 ).set_fill(UpperCAmelCase , opacity=0.7 )
cpu_target.move_to(UpperCAmelCase )
cpu_target.generate_target()
__lowerCAmelCase: List[Any] = 0.46 / 4
__lowerCAmelCase: Any = 0.46 / 3
if i == 0:
cpu_target.target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT ) , buff=0.02 , direction=UpperCAmelCase )
cpu_target.target.set_x(cpu_target.target.get_x() + 0.1 )
elif i == 3:
cpu_target.target.next_to(cpu_targs[0].target , direction=UpperCAmelCase , buff=0.0 )
else:
cpu_target.target.next_to(cpu_targs[i - 1].target , direction=UpperCAmelCase , buff=0.0 )
cpu_targs.append(UpperCAmelCase )
first_animations.append(rect.animate(run_time=0.5 ).set_stroke(UpperCAmelCase ) )
second_animations.append(MoveToTarget(UpperCAmelCase , run_time=1.5 ) )
self.play(*UpperCAmelCase )
self.play(*UpperCAmelCase )
self.wait()
| 322 |
import unittest
from transformers import XLMConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
XLMForMultipleChoice,
XLMForQuestionAnswering,
XLMForQuestionAnsweringSimple,
XLMForSequenceClassification,
XLMForTokenClassification,
XLMModel,
XLMWithLMHeadModel,
)
from transformers.models.xlm.modeling_xlm import XLM_PRETRAINED_MODEL_ARCHIVE_LIST
class A_ :
def __init__( self : Any , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : str=1_3 , UpperCAmelCase : Optional[Any]=7 , UpperCAmelCase : str=True , UpperCAmelCase : Any=True , UpperCAmelCase : Tuple=True , UpperCAmelCase : Any=True , UpperCAmelCase : Optional[int]=True , UpperCAmelCase : List[str]=False , UpperCAmelCase : Tuple=False , UpperCAmelCase : int=False , UpperCAmelCase : Optional[int]=2 , UpperCAmelCase : Any=9_9 , UpperCAmelCase : str=0 , UpperCAmelCase : Dict=3_2 , UpperCAmelCase : int=5 , UpperCAmelCase : Optional[int]=4 , UpperCAmelCase : Any=0.1 , UpperCAmelCase : str=0.1 , UpperCAmelCase : int=5_1_2 , UpperCAmelCase : str=2 , UpperCAmelCase : Optional[int]=0.02 , UpperCAmelCase : Optional[Any]=2 , UpperCAmelCase : List[str]=4 , UpperCAmelCase : Dict="last" , UpperCAmelCase : int=True , UpperCAmelCase : Dict=None , UpperCAmelCase : Union[str, Any]=0 , ) -> Dict:
__lowerCAmelCase: Optional[int] = parent
__lowerCAmelCase: Dict = batch_size
__lowerCAmelCase: Tuple = seq_length
__lowerCAmelCase: Tuple = is_training
__lowerCAmelCase: Optional[Any] = use_input_lengths
__lowerCAmelCase: List[str] = use_token_type_ids
__lowerCAmelCase: Dict = use_labels
__lowerCAmelCase: int = gelu_activation
__lowerCAmelCase: Optional[int] = sinusoidal_embeddings
__lowerCAmelCase: Tuple = causal
__lowerCAmelCase: Optional[Any] = asm
__lowerCAmelCase: int = n_langs
__lowerCAmelCase: Tuple = vocab_size
__lowerCAmelCase: List[Any] = n_special
__lowerCAmelCase: List[Any] = hidden_size
__lowerCAmelCase: Union[str, Any] = num_hidden_layers
__lowerCAmelCase: Dict = num_attention_heads
__lowerCAmelCase: int = hidden_dropout_prob
__lowerCAmelCase: List[str] = attention_probs_dropout_prob
__lowerCAmelCase: Dict = max_position_embeddings
__lowerCAmelCase: List[str] = type_sequence_label_size
__lowerCAmelCase: str = initializer_range
__lowerCAmelCase: List[str] = num_labels
__lowerCAmelCase: List[str] = num_choices
__lowerCAmelCase: Optional[int] = summary_type
__lowerCAmelCase: Any = use_proj
__lowerCAmelCase: Optional[Any] = scope
__lowerCAmelCase: Dict = bos_token_id
def UpperCAmelCase ( self : Union[str, Any] ) -> Tuple:
__lowerCAmelCase: Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__lowerCAmelCase: str = random_attention_mask([self.batch_size, self.seq_length] )
__lowerCAmelCase: Any = None
if self.use_input_lengths:
__lowerCAmelCase: Optional[Any] = (
ids_tensor([self.batch_size] , vocab_size=2 ) + self.seq_length - 2
) # small variation of seq_length
__lowerCAmelCase: str = None
if self.use_token_type_ids:
__lowerCAmelCase: Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.n_langs )
__lowerCAmelCase: int = None
__lowerCAmelCase: Optional[int] = None
__lowerCAmelCase: Optional[int] = None
if self.use_labels:
__lowerCAmelCase: Tuple = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__lowerCAmelCase: Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__lowerCAmelCase: Optional[int] = ids_tensor([self.batch_size] , 2 ).float()
__lowerCAmelCase: str = ids_tensor([self.batch_size] , self.num_choices )
__lowerCAmelCase: Dict = self.get_config()
return (
config,
input_ids,
token_type_ids,
input_lengths,
sequence_labels,
token_labels,
is_impossible_labels,
choice_labels,
input_mask,
)
def UpperCAmelCase ( self : Tuple ) -> List[Any]:
return XLMConfig(
vocab_size=self.vocab_size , n_special=self.n_special , emb_dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , gelu_activation=self.gelu_activation , sinusoidal_embeddings=self.sinusoidal_embeddings , asm=self.asm , causal=self.causal , n_langs=self.n_langs , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , summary_type=self.summary_type , use_proj=self.use_proj , num_labels=self.num_labels , bos_token_id=self.bos_token_id , )
def UpperCAmelCase ( self : int , UpperCAmelCase : Optional[Any] , UpperCAmelCase : Tuple , UpperCAmelCase : Optional[int] , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : Dict , UpperCAmelCase : Optional[Any] , UpperCAmelCase : int , UpperCAmelCase : int , UpperCAmelCase : List[str] , ) -> Optional[int]:
__lowerCAmelCase: List[str] = XLMModel(config=UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
__lowerCAmelCase: Any = model(UpperCAmelCase , lengths=UpperCAmelCase , langs=UpperCAmelCase )
__lowerCAmelCase: List[str] = model(UpperCAmelCase , langs=UpperCAmelCase )
__lowerCAmelCase: List[str] = model(UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCAmelCase ( self : Union[str, Any] , UpperCAmelCase : Optional[Any] , UpperCAmelCase : Optional[int] , UpperCAmelCase : Optional[int] , UpperCAmelCase : Dict , UpperCAmelCase : Any , UpperCAmelCase : List[Any] , UpperCAmelCase : Tuple , UpperCAmelCase : Tuple , UpperCAmelCase : Dict , ) -> int:
__lowerCAmelCase: str = XLMWithLMHeadModel(UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
__lowerCAmelCase: Union[str, Any] = model(UpperCAmelCase , token_type_ids=UpperCAmelCase , labels=UpperCAmelCase )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCAmelCase ( self : Optional[int] , UpperCAmelCase : Optional[int] , UpperCAmelCase : Dict , UpperCAmelCase : Optional[int] , UpperCAmelCase : Any , UpperCAmelCase : Optional[int] , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : List[str] , UpperCAmelCase : str , UpperCAmelCase : Dict , ) -> List[str]:
__lowerCAmelCase: Dict = XLMForQuestionAnsweringSimple(UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
__lowerCAmelCase: str = model(UpperCAmelCase )
__lowerCAmelCase: List[str] = model(UpperCAmelCase , start_positions=UpperCAmelCase , end_positions=UpperCAmelCase )
__lowerCAmelCase: Optional[Any] = outputs
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def UpperCAmelCase ( self : List[str] , UpperCAmelCase : Dict , UpperCAmelCase : List[Any] , UpperCAmelCase : Optional[int] , UpperCAmelCase : Optional[Any] , UpperCAmelCase : str , UpperCAmelCase : List[str] , UpperCAmelCase : List[Any] , UpperCAmelCase : Any , UpperCAmelCase : Optional[int] , ) -> Tuple:
__lowerCAmelCase: Union[str, Any] = XLMForQuestionAnswering(UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
__lowerCAmelCase: List[str] = model(UpperCAmelCase )
__lowerCAmelCase: Union[str, Any] = model(
UpperCAmelCase , start_positions=UpperCAmelCase , end_positions=UpperCAmelCase , cls_index=UpperCAmelCase , is_impossible=UpperCAmelCase , p_mask=UpperCAmelCase , )
__lowerCAmelCase: Any = model(
UpperCAmelCase , start_positions=UpperCAmelCase , end_positions=UpperCAmelCase , cls_index=UpperCAmelCase , is_impossible=UpperCAmelCase , )
((__lowerCAmelCase) , ): List[str] = result_with_labels.to_tuple()
__lowerCAmelCase: Union[str, Any] = model(UpperCAmelCase , start_positions=UpperCAmelCase , end_positions=UpperCAmelCase )
((__lowerCAmelCase) , ): List[Any] = result_with_labels.to_tuple()
self.parent.assertEqual(result_with_labels.loss.shape , () )
self.parent.assertEqual(result.start_top_log_probs.shape , (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(result.start_top_index.shape , (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(
result.end_top_log_probs.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(
result.end_top_index.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(result.cls_logits.shape , (self.batch_size,) )
def UpperCAmelCase ( self : Dict , UpperCAmelCase : Dict , UpperCAmelCase : Optional[Any] , UpperCAmelCase : Optional[Any] , UpperCAmelCase : List[str] , UpperCAmelCase : List[str] , UpperCAmelCase : Any , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : str , UpperCAmelCase : List[str] , ) -> List[Any]:
__lowerCAmelCase: Optional[Any] = XLMForSequenceClassification(UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
__lowerCAmelCase: List[Any] = model(UpperCAmelCase )
__lowerCAmelCase: Tuple = model(UpperCAmelCase , labels=UpperCAmelCase )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def UpperCAmelCase ( self : Optional[Any] , UpperCAmelCase : str , UpperCAmelCase : int , UpperCAmelCase : str , UpperCAmelCase : Tuple , UpperCAmelCase : Optional[Any] , UpperCAmelCase : List[str] , UpperCAmelCase : List[str] , UpperCAmelCase : Dict , UpperCAmelCase : Union[str, Any] , ) -> List[Any]:
__lowerCAmelCase: Union[str, Any] = self.num_labels
__lowerCAmelCase: Tuple = XLMForTokenClassification(UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
__lowerCAmelCase: Optional[int] = model(UpperCAmelCase , attention_mask=UpperCAmelCase , labels=UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def UpperCAmelCase ( self : str , UpperCAmelCase : Tuple , UpperCAmelCase : str , UpperCAmelCase : Any , UpperCAmelCase : str , UpperCAmelCase : str , UpperCAmelCase : str , UpperCAmelCase : str , UpperCAmelCase : List[str] , UpperCAmelCase : List[Any] , ) -> Union[str, Any]:
__lowerCAmelCase: List[Any] = self.num_choices
__lowerCAmelCase: Optional[Any] = XLMForMultipleChoice(config=UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
__lowerCAmelCase: List[Any] = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__lowerCAmelCase: List[str] = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__lowerCAmelCase: Dict = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__lowerCAmelCase: Any = model(
UpperCAmelCase , attention_mask=UpperCAmelCase , token_type_ids=UpperCAmelCase , labels=UpperCAmelCase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def UpperCAmelCase ( self : Tuple ) -> int:
__lowerCAmelCase: Optional[Any] = self.prepare_config_and_inputs()
(
(
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) ,
): Union[str, Any] = config_and_inputs
__lowerCAmelCase: Any = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'lengths': input_lengths}
return config, inputs_dict
@require_torch
class A_ ( snake_case__ , snake_case__ , snake_case__ , unittest.TestCase ):
_lowercase : Any = (
(
XLMModel,
XLMWithLMHeadModel,
XLMForQuestionAnswering,
XLMForSequenceClassification,
XLMForQuestionAnsweringSimple,
XLMForTokenClassification,
XLMForMultipleChoice,
)
if is_torch_available()
else ()
)
_lowercase : Any = (
(XLMWithLMHeadModel,) if is_torch_available() else ()
) # TODO (PVP): Check other models whether language generation is also applicable
_lowercase : Optional[int] = (
{
'feature-extraction': XLMModel,
'fill-mask': XLMWithLMHeadModel,
'question-answering': XLMForQuestionAnsweringSimple,
'text-classification': XLMForSequenceClassification,
'text-generation': XLMWithLMHeadModel,
'token-classification': XLMForTokenClassification,
'zero-shot': XLMForSequenceClassification,
}
if is_torch_available()
else {}
)
def UpperCAmelCase ( self : Any , UpperCAmelCase : List[Any] , UpperCAmelCase : Optional[Any] , UpperCAmelCase : str , UpperCAmelCase : str , UpperCAmelCase : str ) -> int:
if (
pipeline_test_casse_name == "QAPipelineTests"
and tokenizer_name is not None
and not tokenizer_name.endswith('Fast' )
):
# `QAPipelineTests` fails for a few models when the slower tokenizer are used.
# (The slower tokenizers were never used for pipeline tests before the pipeline testing rework)
# TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer
return True
return False
def UpperCAmelCase ( self : Any , UpperCAmelCase : Optional[Any] , UpperCAmelCase : Tuple , UpperCAmelCase : Tuple=False ) -> Dict:
__lowerCAmelCase: Optional[Any] = super()._prepare_for_class(UpperCAmelCase , UpperCAmelCase , return_labels=UpperCAmelCase )
if return_labels:
if model_class.__name__ == "XLMForQuestionAnswering":
__lowerCAmelCase: str = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=UpperCAmelCase )
__lowerCAmelCase: Optional[Any] = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=UpperCAmelCase )
return inputs_dict
def UpperCAmelCase ( self : Union[str, Any] ) -> int:
__lowerCAmelCase: int = XLMModelTester(self )
__lowerCAmelCase: Optional[int] = ConfigTester(self , config_class=UpperCAmelCase , emb_dim=3_7 )
def UpperCAmelCase ( self : List[str] ) -> List[Any]:
self.config_tester.run_common_tests()
def UpperCAmelCase ( self : Dict ) -> List[Any]:
__lowerCAmelCase: str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_model(*UpperCAmelCase )
def UpperCAmelCase ( self : List[Any] ) -> int:
__lowerCAmelCase: Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_lm_head(*UpperCAmelCase )
def UpperCAmelCase ( self : Tuple ) -> Tuple:
__lowerCAmelCase: Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_simple_qa(*UpperCAmelCase )
def UpperCAmelCase ( self : Optional[Any] ) -> Tuple:
__lowerCAmelCase: Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_qa(*UpperCAmelCase )
def UpperCAmelCase ( self : Optional[Any] ) -> Any:
__lowerCAmelCase: List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_sequence_classif(*UpperCAmelCase )
def UpperCAmelCase ( self : Tuple ) -> Tuple:
__lowerCAmelCase: Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_token_classif(*UpperCAmelCase )
def UpperCAmelCase ( self : Any ) -> Any:
__lowerCAmelCase: Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_for_multiple_choice(*UpperCAmelCase )
def UpperCAmelCase ( self : Optional[Any] , UpperCAmelCase : int , UpperCAmelCase : Tuple , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : Dict , UpperCAmelCase : Optional[Any] , UpperCAmelCase : Optional[int]=False , UpperCAmelCase : Dict=1 ) -> Dict:
self.assertIsInstance(UpperCAmelCase , UpperCAmelCase )
self.assertListEqual(
[isinstance(UpperCAmelCase , UpperCAmelCase ) for iter_attentions in attentions] , [True] * len(UpperCAmelCase ) )
self.assertEqual(len(UpperCAmelCase ) , (max_length - min_length) * num_beam_groups )
for idx, iter_attentions in enumerate(UpperCAmelCase ):
# adds PAD dummy token
__lowerCAmelCase: int = min_length + idx + 1
__lowerCAmelCase: Union[str, Any] = min_length + idx + 1
__lowerCAmelCase: Any = (
batch_size * num_beam_groups,
config.num_attention_heads,
tgt_len,
src_len,
)
# check attn size
self.assertListEqual(
[layer_attention.shape for layer_attention in iter_attentions] , [expected_shape] * len(UpperCAmelCase ) )
def UpperCAmelCase ( self : Dict , UpperCAmelCase : int , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : Any , UpperCAmelCase : Optional[Any] , UpperCAmelCase : str=False , UpperCAmelCase : Optional[int]=1 ) -> Union[str, Any]:
self.assertIsInstance(UpperCAmelCase , UpperCAmelCase )
self.assertListEqual(
[isinstance(UpperCAmelCase , UpperCAmelCase ) for iter_hidden_states in hidden_states] , [True] * len(UpperCAmelCase ) , )
self.assertEqual(len(UpperCAmelCase ) , (max_length - min_length) * num_beam_groups )
for idx, iter_hidden_states in enumerate(UpperCAmelCase ):
# adds PAD dummy token
__lowerCAmelCase: Any = min_length + idx + 1
__lowerCAmelCase: str = (batch_size * num_beam_groups, seq_len, config.hidden_size)
# check hidden size
self.assertListEqual(
[layer_hidden_states.shape for layer_hidden_states in iter_hidden_states] , [expected_shape] * len(UpperCAmelCase ) , )
pass
@slow
def UpperCAmelCase ( self : int ) -> Tuple:
for model_name in XLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowerCAmelCase: List[Any] = XLMModel.from_pretrained(UpperCAmelCase )
self.assertIsNotNone(UpperCAmelCase )
@require_torch
class A_ ( unittest.TestCase ):
@slow
def UpperCAmelCase ( self : Tuple ) -> Union[str, Any]:
__lowerCAmelCase: Union[str, Any] = XLMWithLMHeadModel.from_pretrained('xlm-mlm-en-2048' )
model.to(UpperCAmelCase )
__lowerCAmelCase: Optional[int] = torch.tensor([[1_4, 4_4_7]] , dtype=torch.long , device=UpperCAmelCase ) # the president
__lowerCAmelCase: Union[str, Any] = [
1_4,
4_4_7,
1_4,
4_4_7,
1_4,
4_4_7,
1_4,
4_4_7,
1_4,
4_4_7,
1_4,
4_4_7,
1_4,
4_4_7,
1_4,
4_4_7,
1_4,
4_4_7,
1_4,
4_4_7,
] # the president the president the president the president the president the president the president the president the president the president
# TODO(PVP): this and other input_ids I tried for generation give pretty bad results. Not sure why. Model might just not be made for auto-regressive inference
__lowerCAmelCase: str = model.generate(UpperCAmelCase , do_sample=UpperCAmelCase )
self.assertListEqual(output_ids[0].cpu().numpy().tolist() , UpperCAmelCase )
| 322 | 1 |
import numpy as np
from transformers import BatchFeature
from transformers.testing_utils import require_tf, require_torch
from .test_feature_extraction_common import FeatureExtractionSavingTestMixin
class A_ ( snake_case__ ):
# to overwrite at feature extractactor specific tests
_lowercase : Any = None
_lowercase : Optional[int] = None
@property
def UpperCAmelCase ( self : int ) -> Union[str, Any]:
return self.feat_extract_tester.prepare_feat_extract_dict()
def UpperCAmelCase ( self : List[Any] ) -> Optional[Any]:
__lowerCAmelCase: List[str] = self.feature_extraction_class(**self.feat_extract_dict )
self.assertTrue(hasattr(UpperCAmelCase , 'feature_size' ) )
self.assertTrue(hasattr(UpperCAmelCase , 'sampling_rate' ) )
self.assertTrue(hasattr(UpperCAmelCase , 'padding_value' ) )
def UpperCAmelCase ( self : int ) -> str:
__lowerCAmelCase: Dict = self.feat_extract_tester.prepare_inputs_for_common()
__lowerCAmelCase: Union[str, Any] = self.feature_extraction_class(**self.feat_extract_dict )
__lowerCAmelCase: Any = feat_extract.model_input_names[0]
__lowerCAmelCase: Union[str, Any] = BatchFeature({input_name: speech_inputs} )
self.assertTrue(all(len(UpperCAmelCase ) == len(UpperCAmelCase ) for x, y in zip(UpperCAmelCase , processed_features[input_name] ) ) )
__lowerCAmelCase: int = self.feat_extract_tester.prepare_inputs_for_common(equal_length=UpperCAmelCase )
__lowerCAmelCase: Tuple = BatchFeature({input_name: speech_inputs} , tensor_type='np' )
__lowerCAmelCase: Tuple = processed_features[input_name]
if len(batch_features_input.shape ) < 3:
__lowerCAmelCase: int = batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.feature_size) )
@require_torch
def UpperCAmelCase ( self : Dict ) -> Dict:
__lowerCAmelCase: Optional[Any] = self.feat_extract_tester.prepare_inputs_for_common(equal_length=UpperCAmelCase )
__lowerCAmelCase: List[Any] = self.feature_extraction_class(**self.feat_extract_dict )
__lowerCAmelCase: Any = feat_extract.model_input_names[0]
__lowerCAmelCase: List[Any] = BatchFeature({input_name: speech_inputs} , tensor_type='pt' )
__lowerCAmelCase: Any = processed_features[input_name]
if len(batch_features_input.shape ) < 3:
__lowerCAmelCase: List[Any] = batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.feature_size) )
@require_tf
def UpperCAmelCase ( self : Optional[int] ) -> Optional[Any]:
__lowerCAmelCase: str = self.feat_extract_tester.prepare_inputs_for_common(equal_length=UpperCAmelCase )
__lowerCAmelCase: str = self.feature_extraction_class(**self.feat_extract_dict )
__lowerCAmelCase: List[str] = feat_extract.model_input_names[0]
__lowerCAmelCase: Any = BatchFeature({input_name: speech_inputs} , tensor_type='tf' )
__lowerCAmelCase: Any = processed_features[input_name]
if len(batch_features_input.shape ) < 3:
__lowerCAmelCase: List[Any] = batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.feature_size) )
def UpperCAmelCase ( self : Union[str, Any] , UpperCAmelCase : str=False ) -> List[Any]:
def _inputs_have_equal_length(UpperCAmelCase : Union[str, Any] ):
__lowerCAmelCase: List[str] = len(input[0] )
for input_slice in input[1:]:
if len(UpperCAmelCase ) != length:
return False
return True
def _inputs_are_equal(UpperCAmelCase : str , UpperCAmelCase : Union[str, Any] ):
if len(UpperCAmelCase ) != len(UpperCAmelCase ):
return False
for input_slice_a, input_slice_a in zip(UpperCAmelCase , UpperCAmelCase ):
if not np.allclose(np.asarray(UpperCAmelCase ) , np.asarray(UpperCAmelCase ) , atol=1E-3 ):
return False
return True
__lowerCAmelCase: Optional[int] = self.feature_extraction_class(**self.feat_extract_dict )
__lowerCAmelCase: Tuple = self.feat_extract_tester.prepare_inputs_for_common(numpify=UpperCAmelCase )
__lowerCAmelCase: Optional[Any] = feat_extract.model_input_names[0]
__lowerCAmelCase: str = BatchFeature({input_name: speech_inputs} )
__lowerCAmelCase: Tuple = self.feat_extract_tester.seq_length_diff
__lowerCAmelCase: Tuple = self.feat_extract_tester.max_seq_length + pad_diff
__lowerCAmelCase: int = self.feat_extract_tester.min_seq_length
__lowerCAmelCase: Optional[Any] = self.feat_extract_tester.batch_size
__lowerCAmelCase: Dict = self.feat_extract_tester.feature_size
# test padding for List[int] + numpy
__lowerCAmelCase: List[Any] = feat_extract.pad(UpperCAmelCase , padding=UpperCAmelCase )
__lowerCAmelCase: int = input_a[input_name]
__lowerCAmelCase: int = feat_extract.pad(UpperCAmelCase , padding='longest' )
__lowerCAmelCase: Union[str, Any] = input_a[input_name]
__lowerCAmelCase: Union[str, Any] = feat_extract.pad(UpperCAmelCase , padding='max_length' , max_length=len(speech_inputs[-1] ) )
__lowerCAmelCase: List[Any] = input_a[input_name]
__lowerCAmelCase: Dict = feat_extract.pad(UpperCAmelCase , padding='longest' , return_tensors='np' )
__lowerCAmelCase: Tuple = input_a[input_name]
# max_length parameter has to be provided when setting `padding="max_length"`
with self.assertRaises(UpperCAmelCase ):
feat_extract.pad(UpperCAmelCase , padding='max_length' )[input_name]
__lowerCAmelCase: Dict = feat_extract.pad(
UpperCAmelCase , padding='max_length' , max_length=UpperCAmelCase , return_tensors='np' )
__lowerCAmelCase: Any = input_a[input_name]
self.assertFalse(_inputs_have_equal_length(UpperCAmelCase ) )
self.assertTrue(_inputs_have_equal_length(UpperCAmelCase ) )
self.assertTrue(_inputs_have_equal_length(UpperCAmelCase ) )
self.assertTrue(_inputs_are_equal(UpperCAmelCase , UpperCAmelCase ) )
self.assertTrue(len(input_a[0] ) == pad_min_length )
self.assertTrue(len(input_a[1] ) == pad_min_length + pad_diff )
self.assertTrue(input_a.shape[:2] == (batch_size, len(input_a[0] )) )
self.assertTrue(input_a.shape[:2] == (batch_size, pad_max_length) )
if feature_size > 1:
self.assertTrue(input_a.shape[2] == input_a.shape[2] == feature_size )
# test padding for `pad_to_multiple_of` for List[int] + numpy
__lowerCAmelCase: Any = feat_extract.pad(UpperCAmelCase , pad_to_multiple_of=1_0 )
__lowerCAmelCase: Optional[Any] = input_a[input_name]
__lowerCAmelCase: Union[str, Any] = feat_extract.pad(UpperCAmelCase , padding='longest' , pad_to_multiple_of=1_0 )
__lowerCAmelCase: Optional[int] = input_a[input_name]
__lowerCAmelCase: List[str] = feat_extract.pad(
UpperCAmelCase , padding='max_length' , pad_to_multiple_of=1_0 , max_length=UpperCAmelCase )
__lowerCAmelCase: Optional[Any] = input_a[input_name]
__lowerCAmelCase: Dict = feat_extract.pad(
UpperCAmelCase , padding='max_length' , pad_to_multiple_of=1_0 , max_length=UpperCAmelCase , return_tensors='np' , )
__lowerCAmelCase: Any = input_a[input_name]
self.assertTrue(all(len(UpperCAmelCase ) % 1_0 == 0 for x in input_a ) )
self.assertTrue(_inputs_are_equal(UpperCAmelCase , UpperCAmelCase ) )
__lowerCAmelCase: Optional[int] = pad_max_length if pad_max_length % 1_0 == 0 else (pad_max_length // 1_0 + 1) * 1_0
self.assertTrue(all(len(UpperCAmelCase ) == expected_mult_pad_length for x in input_a ) )
self.assertEqual(input_a.shape[:2] , (batch_size, expected_mult_pad_length) )
if feature_size > 1:
self.assertTrue(input_a.shape[2] == feature_size )
# Check padding value is correct
__lowerCAmelCase: List[str] = (np.ones(self.feat_extract_tester.feature_size ) * feat_extract.padding_value).sum()
self.assertTrue(
abs(np.asarray(input_a[0] )[pad_min_length:].sum() - padding_vector_sum * (pad_max_length - pad_min_length) )
< 1E-3 )
self.assertTrue(
abs(
np.asarray(input_a[1] )[pad_min_length + pad_diff :].sum()
- padding_vector_sum * (pad_max_length - pad_min_length - pad_diff) )
< 1E-3 )
self.assertTrue(
abs(
np.asarray(input_a[2] )[pad_min_length + 2 * pad_diff :].sum()
- padding_vector_sum * (pad_max_length - pad_min_length - 2 * pad_diff) )
< 1E-3 )
self.assertTrue(
abs(input_a[0, pad_min_length:].sum() - padding_vector_sum * (pad_max_length - pad_min_length) ) < 1E-3 )
self.assertTrue(
abs(input_a[0, pad_min_length:].sum() - padding_vector_sum * (expected_mult_pad_length - pad_min_length) )
< 1E-3 )
def UpperCAmelCase ( self : Union[str, Any] , UpperCAmelCase : Dict=False ) -> Optional[Any]:
def _inputs_have_equal_length(UpperCAmelCase : Optional[int] ):
__lowerCAmelCase: Tuple = len(input[0] )
for input_slice in input[1:]:
if len(UpperCAmelCase ) != length:
return False
return True
def _inputs_are_equal(UpperCAmelCase : str , UpperCAmelCase : str ):
if len(UpperCAmelCase ) != len(UpperCAmelCase ):
return False
for input_slice_a, input_slice_a in zip(UpperCAmelCase , UpperCAmelCase ):
if not np.allclose(np.asarray(UpperCAmelCase ) , np.asarray(UpperCAmelCase ) , atol=1E-3 ):
return False
return True
__lowerCAmelCase: Union[str, Any] = self.feature_extraction_class(**self.feat_extract_dict )
__lowerCAmelCase: Union[str, Any] = self.feat_extract_tester.prepare_inputs_for_common(numpify=UpperCAmelCase )
__lowerCAmelCase: Dict = feat_extract.model_input_names[0]
__lowerCAmelCase: str = BatchFeature({input_name: speech_inputs} )
# truncate to smallest
__lowerCAmelCase: List[str] = feat_extract.pad(
UpperCAmelCase , padding='max_length' , max_length=len(speech_inputs[0] ) , truncation=UpperCAmelCase )
__lowerCAmelCase: Optional[Any] = input_a[input_name]
__lowerCAmelCase: Any = feat_extract.pad(UpperCAmelCase , padding='max_length' , max_length=len(speech_inputs[0] ) )
__lowerCAmelCase: Optional[int] = input_a[input_name]
self.assertTrue(_inputs_have_equal_length(UpperCAmelCase ) )
self.assertFalse(_inputs_have_equal_length(UpperCAmelCase ) )
# truncate to smallest with np
__lowerCAmelCase: int = feat_extract.pad(
UpperCAmelCase , padding='max_length' , max_length=len(speech_inputs[0] ) , return_tensors='np' , truncation=UpperCAmelCase , )
__lowerCAmelCase: Any = input_a[input_name]
__lowerCAmelCase: Union[str, Any] = feat_extract.pad(
UpperCAmelCase , padding='max_length' , max_length=len(speech_inputs[0] ) , return_tensors='np' )
__lowerCAmelCase: Optional[int] = input_a[input_name]
self.assertTrue(_inputs_have_equal_length(UpperCAmelCase ) )
self.assertTrue(input_a.shape[1] == len(speech_inputs[0] ) )
# since truncation forces padding to be smaller than longest input
# function can't return `np.ndarray`, but has to return list
self.assertFalse(_inputs_have_equal_length(UpperCAmelCase ) )
# truncate to middle
__lowerCAmelCase: Dict = feat_extract.pad(
UpperCAmelCase , padding='max_length' , max_length=len(speech_inputs[1] ) , truncation=UpperCAmelCase , return_tensors='np' , )
__lowerCAmelCase: List[Any] = input_a[input_name]
__lowerCAmelCase: List[Any] = feat_extract.pad(
UpperCAmelCase , padding='max_length' , max_length=len(speech_inputs[1] ) , truncation=UpperCAmelCase )
__lowerCAmelCase: Dict = input_a[input_name]
__lowerCAmelCase: str = feat_extract.pad(
UpperCAmelCase , padding='max_length' , max_length=len(speech_inputs[1] ) , return_tensors='np' )
__lowerCAmelCase: Any = input_a[input_name]
self.assertTrue(input_a.shape[1] == len(speech_inputs[1] ) )
self.assertTrue(_inputs_have_equal_length(UpperCAmelCase ) )
self.assertTrue(_inputs_have_equal_length(UpperCAmelCase ) )
self.assertTrue(_inputs_are_equal(UpperCAmelCase , UpperCAmelCase ) )
# since truncation forces padding to be smaller than longest input
# function can't return `np.ndarray`, but has to return list
self.assertFalse(_inputs_have_equal_length(UpperCAmelCase ) )
self.assertTrue(len(input_a[-1] ) == len(speech_inputs[-1] ) )
# padding has to be max_length when setting `truncation=True`
with self.assertRaises(UpperCAmelCase ):
feat_extract.pad(UpperCAmelCase , truncation=UpperCAmelCase )[input_name]
# padding has to be max_length when setting `truncation=True`
with self.assertRaises(UpperCAmelCase ):
feat_extract.pad(UpperCAmelCase , padding='longest' , truncation=UpperCAmelCase )[input_name]
# padding has to be max_length when setting `truncation=True`
with self.assertRaises(UpperCAmelCase ):
feat_extract.pad(UpperCAmelCase , padding='longest' , truncation=UpperCAmelCase )[input_name]
# max_length parameter has to be provided when setting `truncation=True` and padding="max_length"
with self.assertRaises(UpperCAmelCase ):
feat_extract.pad(UpperCAmelCase , padding='max_length' , truncation=UpperCAmelCase )[input_name]
# test truncation for `pad_to_multiple_of` for List[int] + numpy
__lowerCAmelCase: Optional[int] = 1_2
__lowerCAmelCase: Any = feat_extract.pad(
UpperCAmelCase , padding='max_length' , max_length=len(speech_inputs[0] ) , pad_to_multiple_of=UpperCAmelCase , truncation=UpperCAmelCase , )
__lowerCAmelCase: List[Any] = input_a[input_name]
__lowerCAmelCase: str = feat_extract.pad(
UpperCAmelCase , padding='max_length' , max_length=len(speech_inputs[0] ) , pad_to_multiple_of=UpperCAmelCase , )
__lowerCAmelCase: List[str] = input_a[input_name]
# retrieve expected_length as multiple of pad_to_multiple_of
__lowerCAmelCase: List[str] = len(speech_inputs[0] )
if expected_length % pad_to_multiple_of != 0:
__lowerCAmelCase: Dict = ((len(speech_inputs[0] ) // pad_to_multiple_of) + 1) * pad_to_multiple_of
self.assertTrue(len(input_a[0] ) == expected_length )
self.assertTrue(_inputs_have_equal_length(UpperCAmelCase ) )
self.assertFalse(_inputs_have_equal_length(UpperCAmelCase ) )
def UpperCAmelCase ( self : Optional[Any] ) -> int:
self._check_padding(numpify=UpperCAmelCase )
def UpperCAmelCase ( self : List[str] ) -> List[Any]:
self._check_padding(numpify=UpperCAmelCase )
def UpperCAmelCase ( self : List[str] ) -> str:
self._check_truncation(numpify=UpperCAmelCase )
def UpperCAmelCase ( self : List[Any] ) -> str:
self._check_truncation(numpify=UpperCAmelCase )
@require_torch
def UpperCAmelCase ( self : Tuple ) -> Dict:
__lowerCAmelCase: Dict = self.feature_extraction_class(**self.feat_extract_dict )
__lowerCAmelCase: Any = self.feat_extract_tester.prepare_inputs_for_common()
__lowerCAmelCase: Union[str, Any] = feat_extract.model_input_names[0]
__lowerCAmelCase: List[str] = BatchFeature({input_name: speech_inputs} )
__lowerCAmelCase: Tuple = feat_extract.pad(UpperCAmelCase , padding='longest' , return_tensors='np' )[input_name]
__lowerCAmelCase: Dict = feat_extract.pad(UpperCAmelCase , padding='longest' , return_tensors='pt' )[input_name]
self.assertTrue(abs(input_np.astype(np.floataa ).sum() - input_pt.numpy().astype(np.floataa ).sum() ) < 1E-2 )
@require_tf
def UpperCAmelCase ( self : Optional[Any] ) -> Union[str, Any]:
__lowerCAmelCase: Any = self.feature_extraction_class(**self.feat_extract_dict )
__lowerCAmelCase: Dict = self.feat_extract_tester.prepare_inputs_for_common()
__lowerCAmelCase: Tuple = feat_extract.model_input_names[0]
__lowerCAmelCase: List[Any] = BatchFeature({input_name: speech_inputs} )
__lowerCAmelCase: Any = feat_extract.pad(UpperCAmelCase , padding='longest' , return_tensors='np' )[input_name]
__lowerCAmelCase: int = feat_extract.pad(UpperCAmelCase , padding='longest' , return_tensors='tf' )[input_name]
self.assertTrue(abs(input_np.astype(np.floataa ).sum() - input_tf.numpy().astype(np.floataa ).sum() ) < 1E-2 )
def UpperCAmelCase ( self : Any ) -> List[str]:
__lowerCAmelCase: Any = self.feat_extract_dict
__lowerCAmelCase: Tuple = True
__lowerCAmelCase: Optional[int] = self.feature_extraction_class(**UpperCAmelCase )
__lowerCAmelCase: Dict = self.feat_extract_tester.prepare_inputs_for_common()
__lowerCAmelCase: int = [len(UpperCAmelCase ) for x in speech_inputs]
__lowerCAmelCase: Tuple = feat_extract.model_input_names[0]
__lowerCAmelCase: str = BatchFeature({input_name: speech_inputs} )
__lowerCAmelCase: Any = feat_extract.pad(UpperCAmelCase , padding='longest' , return_tensors='np' )
self.assertIn('attention_mask' , UpperCAmelCase )
self.assertListEqual(list(processed.attention_mask.shape ) , list(processed[input_name].shape[:2] ) )
self.assertListEqual(processed.attention_mask.sum(-1 ).tolist() , UpperCAmelCase )
def UpperCAmelCase ( self : List[Any] ) -> Union[str, Any]:
__lowerCAmelCase: int = self.feat_extract_dict
__lowerCAmelCase: Union[str, Any] = True
__lowerCAmelCase: Any = self.feature_extraction_class(**UpperCAmelCase )
__lowerCAmelCase: Optional[int] = self.feat_extract_tester.prepare_inputs_for_common()
__lowerCAmelCase: Union[str, Any] = [len(UpperCAmelCase ) for x in speech_inputs]
__lowerCAmelCase: str = feat_extract.model_input_names[0]
__lowerCAmelCase: Optional[Any] = BatchFeature({input_name: speech_inputs} )
__lowerCAmelCase: Tuple = min(UpperCAmelCase )
__lowerCAmelCase: Optional[int] = feat_extract.pad(
UpperCAmelCase , padding='max_length' , max_length=UpperCAmelCase , truncation=UpperCAmelCase , return_tensors='np' )
self.assertIn('attention_mask' , UpperCAmelCase )
self.assertListEqual(
list(processed_pad.attention_mask.shape ) , [processed_pad[input_name].shape[0], max_length] )
self.assertListEqual(
processed_pad.attention_mask[:, :max_length].sum(-1 ).tolist() , [max_length for x in speech_inputs] )
| 322 |
def _a ( SCREAMING_SNAKE_CASE : Optional[int] ) -> Optional[int]:
"""simple docstring"""
__lowerCAmelCase: List[Any] = 0
__lowerCAmelCase: Optional[int] = len(SCREAMING_SNAKE_CASE )
for i in range(n - 1 ):
for j in range(i + 1 , SCREAMING_SNAKE_CASE ):
if arr[i] > arr[j]:
num_inversions += 1
return num_inversions
def _a ( SCREAMING_SNAKE_CASE : Any ) -> str:
"""simple docstring"""
if len(SCREAMING_SNAKE_CASE ) <= 1:
return arr, 0
__lowerCAmelCase: str = len(SCREAMING_SNAKE_CASE ) // 2
__lowerCAmelCase: str = arr[0:mid]
__lowerCAmelCase: int = arr[mid:]
__lowerCAmelCase , __lowerCAmelCase: List[Any] = count_inversions_recursive(SCREAMING_SNAKE_CASE )
__lowerCAmelCase , __lowerCAmelCase: Dict = count_inversions_recursive(SCREAMING_SNAKE_CASE )
__lowerCAmelCase , __lowerCAmelCase: int = _count_cross_inversions(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
__lowerCAmelCase: int = inversion_p + inversions_q + cross_inversions
return c, num_inversions
def _a ( SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : Any ) -> Optional[int]:
"""simple docstring"""
__lowerCAmelCase: List[str] = []
__lowerCAmelCase: List[str] = 0
while i < len(SCREAMING_SNAKE_CASE ) and j < len(SCREAMING_SNAKE_CASE ):
if p[i] > q[j]:
# if P[1] > Q[j], then P[k] > Q[k] for all i < k <= len(P)
# These are all inversions. The claim emerges from the
# property that P is sorted.
num_inversion += len(SCREAMING_SNAKE_CASE ) - i
r.append(q[j] )
j += 1
else:
r.append(p[i] )
i += 1
if i < len(SCREAMING_SNAKE_CASE ):
r.extend(p[i:] )
else:
r.extend(q[j:] )
return r, num_inversion
def _a ( ) -> int:
"""simple docstring"""
__lowerCAmelCase: List[Any] = [10, 2, 1, 5, 5, 2, 11]
# this arr has 8 inversions:
# (10, 2), (10, 1), (10, 5), (10, 5), (10, 2), (2, 1), (5, 2), (5, 2)
__lowerCAmelCase: Tuple = count_inversions_bf(SCREAMING_SNAKE_CASE )
__lowerCAmelCase , __lowerCAmelCase: str = count_inversions_recursive(SCREAMING_SNAKE_CASE )
assert num_inversions_bf == num_inversions_recursive == 8
print('number of inversions = ' , SCREAMING_SNAKE_CASE )
# testing an array with zero inversion (a sorted arr_1)
arr_a.sort()
__lowerCAmelCase: Tuple = count_inversions_bf(SCREAMING_SNAKE_CASE )
__lowerCAmelCase , __lowerCAmelCase: Optional[Any] = count_inversions_recursive(SCREAMING_SNAKE_CASE )
assert num_inversions_bf == num_inversions_recursive == 0
print('number of inversions = ' , SCREAMING_SNAKE_CASE )
# an empty list should also have zero inversions
__lowerCAmelCase: int = []
__lowerCAmelCase: Any = count_inversions_bf(SCREAMING_SNAKE_CASE )
__lowerCAmelCase , __lowerCAmelCase: Dict = count_inversions_recursive(SCREAMING_SNAKE_CASE )
assert num_inversions_bf == num_inversions_recursive == 0
print('number of inversions = ' , SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
main()
| 322 | 1 |
import torch
from diffusers import KDPMaDiscreteScheduler
from diffusers.utils import torch_device
from .test_schedulers import SchedulerCommonTest
class A_ ( snake_case__ ):
_lowercase : Tuple = (KDPMaDiscreteScheduler,)
_lowercase : Union[str, Any] = 1_0
def UpperCAmelCase ( self : List[Any] , **UpperCAmelCase : Union[str, Any] ) -> Union[str, Any]:
__lowerCAmelCase: str = {
'num_train_timesteps': 1_1_0_0,
'beta_start': 0.0001,
'beta_end': 0.02,
'beta_schedule': 'linear',
}
config.update(**UpperCAmelCase )
return config
def UpperCAmelCase ( self : List[Any] ) -> str:
for timesteps in [1_0, 5_0, 1_0_0, 1_0_0_0]:
self.check_over_configs(num_train_timesteps=UpperCAmelCase )
def UpperCAmelCase ( self : str ) -> Dict:
for beta_start, beta_end in zip([0.00001, 0.0001, 0.001] , [0.0002, 0.002, 0.02] ):
self.check_over_configs(beta_start=UpperCAmelCase , beta_end=UpperCAmelCase )
def UpperCAmelCase ( self : int ) -> List[Any]:
for schedule in ["linear", "scaled_linear"]:
self.check_over_configs(beta_schedule=UpperCAmelCase )
def UpperCAmelCase ( self : Any ) -> Dict:
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=UpperCAmelCase )
def UpperCAmelCase ( self : Optional[Any] ) -> List[Any]:
__lowerCAmelCase: Optional[Any] = self.scheduler_classes[0]
__lowerCAmelCase: Union[str, Any] = self.get_scheduler_config(prediction_type='v_prediction' )
__lowerCAmelCase: Optional[Any] = scheduler_class(**UpperCAmelCase )
scheduler.set_timesteps(self.num_inference_steps )
__lowerCAmelCase: Optional[Any] = self.dummy_model()
__lowerCAmelCase: Union[str, Any] = self.dummy_sample_deter * scheduler.init_noise_sigma
__lowerCAmelCase: Optional[Any] = sample.to(UpperCAmelCase )
for i, t in enumerate(scheduler.timesteps ):
__lowerCAmelCase: List[Any] = scheduler.scale_model_input(UpperCAmelCase , UpperCAmelCase )
__lowerCAmelCase: List[Any] = model(UpperCAmelCase , UpperCAmelCase )
__lowerCAmelCase: Any = scheduler.step(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
__lowerCAmelCase: List[str] = output.prev_sample
__lowerCAmelCase: Optional[Any] = torch.sum(torch.abs(UpperCAmelCase ) )
__lowerCAmelCase: Union[str, Any] = torch.mean(torch.abs(UpperCAmelCase ) )
if torch_device in ["cpu", "mps"]:
assert abs(result_sum.item() - 4.69_34E-07 ) < 1E-2
assert abs(result_mean.item() - 6.11_12E-10 ) < 1E-3
else:
# CUDA
assert abs(result_sum.item() - 4.6_93_42_86_50_17_09_72E-07 ) < 1E-2
assert abs(result_mean.item() - 0.0002 ) < 1E-3
def UpperCAmelCase ( self : Union[str, Any] ) -> Tuple:
if torch_device == "mps":
return
__lowerCAmelCase: Union[str, Any] = self.scheduler_classes[0]
__lowerCAmelCase: int = self.get_scheduler_config()
__lowerCAmelCase: Union[str, Any] = scheduler_class(**UpperCAmelCase )
scheduler.set_timesteps(self.num_inference_steps )
__lowerCAmelCase: Union[str, Any] = self.dummy_model()
__lowerCAmelCase: Tuple = self.dummy_sample_deter * scheduler.init_noise_sigma
__lowerCAmelCase: List[Any] = sample.to(UpperCAmelCase )
for i, t in enumerate(scheduler.timesteps ):
__lowerCAmelCase: Dict = scheduler.scale_model_input(UpperCAmelCase , UpperCAmelCase )
__lowerCAmelCase: str = model(UpperCAmelCase , UpperCAmelCase )
__lowerCAmelCase: Tuple = scheduler.step(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
__lowerCAmelCase: str = output.prev_sample
__lowerCAmelCase: Tuple = torch.sum(torch.abs(UpperCAmelCase ) )
__lowerCAmelCase: Tuple = torch.mean(torch.abs(UpperCAmelCase ) )
if torch_device in ["cpu", "mps"]:
assert abs(result_sum.item() - 20.4125 ) < 1E-2
assert abs(result_mean.item() - 0.0266 ) < 1E-3
else:
# CUDA
assert abs(result_sum.item() - 20.4125 ) < 1E-2
assert abs(result_mean.item() - 0.0266 ) < 1E-3
def UpperCAmelCase ( self : Optional[Any] ) -> List[str]:
if torch_device == "mps":
return
__lowerCAmelCase: Optional[Any] = self.scheduler_classes[0]
__lowerCAmelCase: Union[str, Any] = self.get_scheduler_config()
__lowerCAmelCase: List[Any] = scheduler_class(**UpperCAmelCase )
scheduler.set_timesteps(self.num_inference_steps , device=UpperCAmelCase )
__lowerCAmelCase: str = self.dummy_model()
__lowerCAmelCase: Dict = self.dummy_sample_deter.to(UpperCAmelCase ) * scheduler.init_noise_sigma
for t in scheduler.timesteps:
__lowerCAmelCase: List[Any] = scheduler.scale_model_input(UpperCAmelCase , UpperCAmelCase )
__lowerCAmelCase: Optional[int] = model(UpperCAmelCase , UpperCAmelCase )
__lowerCAmelCase: Optional[int] = scheduler.step(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
__lowerCAmelCase: Any = output.prev_sample
__lowerCAmelCase: str = torch.sum(torch.abs(UpperCAmelCase ) )
__lowerCAmelCase: Union[str, Any] = torch.mean(torch.abs(UpperCAmelCase ) )
if str(UpperCAmelCase ).startswith('cpu' ):
# The following sum varies between 148 and 156 on mps. Why?
assert abs(result_sum.item() - 20.4125 ) < 1E-2
assert abs(result_mean.item() - 0.0266 ) < 1E-3
else:
# CUDA
assert abs(result_sum.item() - 20.4125 ) < 1E-2
assert abs(result_mean.item() - 0.0266 ) < 1E-3
| 322 |
import tempfile
import torch
from diffusers import (
DEISMultistepScheduler,
DPMSolverMultistepScheduler,
DPMSolverSinglestepScheduler,
UniPCMultistepScheduler,
)
from .test_schedulers import SchedulerCommonTest
class A_ ( snake_case__ ):
_lowercase : int = (DPMSolverSinglestepScheduler,)
_lowercase : Optional[Any] = (('num_inference_steps', 2_5),)
def UpperCAmelCase ( self : Dict , **UpperCAmelCase : List[Any] ) -> Optional[Any]:
__lowerCAmelCase: Union[str, Any] = {
'num_train_timesteps': 1_0_0_0,
'beta_start': 0.0001,
'beta_end': 0.02,
'beta_schedule': 'linear',
'solver_order': 2,
'prediction_type': 'epsilon',
'thresholding': False,
'sample_max_value': 1.0,
'algorithm_type': 'dpmsolver++',
'solver_type': 'midpoint',
'lambda_min_clipped': -float('inf' ),
'variance_type': None,
}
config.update(**UpperCAmelCase )
return config
def UpperCAmelCase ( self : str , UpperCAmelCase : List[Any]=0 , **UpperCAmelCase : str ) -> Any:
__lowerCAmelCase: Optional[int] = dict(self.forward_default_kwargs )
__lowerCAmelCase: int = kwargs.pop('num_inference_steps' , UpperCAmelCase )
__lowerCAmelCase: int = self.dummy_sample
__lowerCAmelCase: Union[str, Any] = 0.1 * sample
__lowerCAmelCase: str = [residual + 0.2, residual + 0.15, residual + 0.10]
for scheduler_class in self.scheduler_classes:
__lowerCAmelCase: Union[str, Any] = self.get_scheduler_config(**UpperCAmelCase )
__lowerCAmelCase: Optional[Any] = scheduler_class(**UpperCAmelCase )
scheduler.set_timesteps(UpperCAmelCase )
# copy over dummy past residuals
__lowerCAmelCase: Optional[int] = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(UpperCAmelCase )
__lowerCAmelCase: Dict = scheduler_class.from_pretrained(UpperCAmelCase )
new_scheduler.set_timesteps(UpperCAmelCase )
# copy over dummy past residuals
__lowerCAmelCase: Optional[int] = dummy_past_residuals[: new_scheduler.config.solver_order]
__lowerCAmelCase , __lowerCAmelCase: Optional[int] = sample, sample
for t in range(UpperCAmelCase , time_step + scheduler.config.solver_order + 1 ):
__lowerCAmelCase: str = scheduler.step(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , **UpperCAmelCase ).prev_sample
__lowerCAmelCase: str = new_scheduler.step(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , **UpperCAmelCase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def UpperCAmelCase ( self : str ) -> str:
pass
def UpperCAmelCase ( self : Optional[Any] , UpperCAmelCase : Any=0 , **UpperCAmelCase : Optional[int] ) -> Tuple:
__lowerCAmelCase: Tuple = dict(self.forward_default_kwargs )
__lowerCAmelCase: Tuple = kwargs.pop('num_inference_steps' , UpperCAmelCase )
__lowerCAmelCase: Tuple = self.dummy_sample
__lowerCAmelCase: Union[str, Any] = 0.1 * sample
__lowerCAmelCase: Tuple = [residual + 0.2, residual + 0.15, residual + 0.10]
for scheduler_class in self.scheduler_classes:
__lowerCAmelCase: Dict = self.get_scheduler_config()
__lowerCAmelCase: Any = scheduler_class(**UpperCAmelCase )
scheduler.set_timesteps(UpperCAmelCase )
# copy over dummy past residuals (must be after setting timesteps)
__lowerCAmelCase: List[Any] = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(UpperCAmelCase )
__lowerCAmelCase: List[str] = scheduler_class.from_pretrained(UpperCAmelCase )
# copy over dummy past residuals
new_scheduler.set_timesteps(UpperCAmelCase )
# copy over dummy past residual (must be after setting timesteps)
__lowerCAmelCase: Optional[Any] = dummy_past_residuals[: new_scheduler.config.solver_order]
__lowerCAmelCase: Any = scheduler.step(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , **UpperCAmelCase ).prev_sample
__lowerCAmelCase: Dict = new_scheduler.step(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , **UpperCAmelCase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def UpperCAmelCase ( self : int , UpperCAmelCase : Dict=None , **UpperCAmelCase : List[str] ) -> Union[str, Any]:
if scheduler is None:
__lowerCAmelCase: str = self.scheduler_classes[0]
__lowerCAmelCase: int = self.get_scheduler_config(**UpperCAmelCase )
__lowerCAmelCase: Any = scheduler_class(**UpperCAmelCase )
__lowerCAmelCase: List[Any] = self.scheduler_classes[0]
__lowerCAmelCase: List[str] = self.get_scheduler_config(**UpperCAmelCase )
__lowerCAmelCase: Optional[Any] = scheduler_class(**UpperCAmelCase )
__lowerCAmelCase: List[Any] = 1_0
__lowerCAmelCase: Dict = self.dummy_model()
__lowerCAmelCase: Dict = self.dummy_sample_deter
scheduler.set_timesteps(UpperCAmelCase )
for i, t in enumerate(scheduler.timesteps ):
__lowerCAmelCase: Dict = model(UpperCAmelCase , UpperCAmelCase )
__lowerCAmelCase: List[Any] = scheduler.step(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ).prev_sample
return sample
def UpperCAmelCase ( self : List[str] ) -> Union[str, Any]:
__lowerCAmelCase: List[str] = DPMSolverSinglestepScheduler(**self.get_scheduler_config() )
__lowerCAmelCase: Any = 5_0
__lowerCAmelCase: int = self.dummy_model()
__lowerCAmelCase: List[str] = self.dummy_sample_deter
scheduler.set_timesteps(UpperCAmelCase )
# make sure that the first t is uneven
for i, t in enumerate(scheduler.timesteps[3:] ):
__lowerCAmelCase: List[Any] = model(UpperCAmelCase , UpperCAmelCase )
__lowerCAmelCase: List[Any] = scheduler.step(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ).prev_sample
__lowerCAmelCase: Optional[int] = torch.mean(torch.abs(UpperCAmelCase ) )
assert abs(result_mean.item() - 0.2574 ) < 1E-3
def UpperCAmelCase ( self : Optional[int] ) -> Dict:
for timesteps in [2_5, 5_0, 1_0_0, 9_9_9, 1_0_0_0]:
self.check_over_configs(num_train_timesteps=UpperCAmelCase )
def UpperCAmelCase ( self : Optional[Any] ) -> Any:
# make sure that iterating over schedulers with same config names gives same results
# for defaults
__lowerCAmelCase: List[str] = DPMSolverSinglestepScheduler(**self.get_scheduler_config() )
__lowerCAmelCase: Dict = self.full_loop(scheduler=UpperCAmelCase )
__lowerCAmelCase: Optional[Any] = torch.mean(torch.abs(UpperCAmelCase ) )
assert abs(result_mean.item() - 0.2791 ) < 1E-3
__lowerCAmelCase: Tuple = DEISMultistepScheduler.from_config(scheduler.config )
__lowerCAmelCase: List[str] = DPMSolverMultistepScheduler.from_config(scheduler.config )
__lowerCAmelCase: Any = UniPCMultistepScheduler.from_config(scheduler.config )
__lowerCAmelCase: Optional[int] = DPMSolverSinglestepScheduler.from_config(scheduler.config )
__lowerCAmelCase: Union[str, Any] = self.full_loop(scheduler=UpperCAmelCase )
__lowerCAmelCase: List[Any] = torch.mean(torch.abs(UpperCAmelCase ) )
assert abs(result_mean.item() - 0.2791 ) < 1E-3
def UpperCAmelCase ( self : List[str] ) -> List[str]:
self.check_over_configs(thresholding=UpperCAmelCase )
for order in [1, 2, 3]:
for solver_type in ["midpoint", "heun"]:
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
thresholding=UpperCAmelCase , prediction_type=UpperCAmelCase , sample_max_value=UpperCAmelCase , algorithm_type='dpmsolver++' , solver_order=UpperCAmelCase , solver_type=UpperCAmelCase , )
def UpperCAmelCase ( self : Any ) -> Union[str, Any]:
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=UpperCAmelCase )
def UpperCAmelCase ( self : Tuple ) -> str:
for algorithm_type in ["dpmsolver", "dpmsolver++"]:
for solver_type in ["midpoint", "heun"]:
for order in [1, 2, 3]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
solver_order=UpperCAmelCase , solver_type=UpperCAmelCase , prediction_type=UpperCAmelCase , algorithm_type=UpperCAmelCase , )
__lowerCAmelCase: Dict = self.full_loop(
solver_order=UpperCAmelCase , solver_type=UpperCAmelCase , prediction_type=UpperCAmelCase , algorithm_type=UpperCAmelCase , )
assert not torch.isnan(UpperCAmelCase ).any(), "Samples have nan numbers"
def UpperCAmelCase ( self : Optional[Any] ) -> str:
self.check_over_configs(lower_order_final=UpperCAmelCase )
self.check_over_configs(lower_order_final=UpperCAmelCase )
def UpperCAmelCase ( self : str ) -> Any:
self.check_over_configs(lambda_min_clipped=-float('inf' ) )
self.check_over_configs(lambda_min_clipped=-5.1 )
def UpperCAmelCase ( self : List[Any] ) -> str:
self.check_over_configs(variance_type=UpperCAmelCase )
self.check_over_configs(variance_type='learned_range' )
def UpperCAmelCase ( self : Union[str, Any] ) -> List[Any]:
for num_inference_steps in [1, 2, 3, 5, 1_0, 5_0, 1_0_0, 9_9_9, 1_0_0_0]:
self.check_over_forward(num_inference_steps=UpperCAmelCase , time_step=0 )
def UpperCAmelCase ( self : Any ) -> int:
__lowerCAmelCase: Any = self.full_loop()
__lowerCAmelCase: Tuple = torch.mean(torch.abs(UpperCAmelCase ) )
assert abs(result_mean.item() - 0.2791 ) < 1E-3
def UpperCAmelCase ( self : Any ) -> Union[str, Any]:
__lowerCAmelCase: List[str] = self.full_loop(use_karras_sigmas=UpperCAmelCase )
__lowerCAmelCase: str = torch.mean(torch.abs(UpperCAmelCase ) )
assert abs(result_mean.item() - 0.2248 ) < 1E-3
def UpperCAmelCase ( self : Dict ) -> Optional[Any]:
__lowerCAmelCase: Tuple = self.full_loop(prediction_type='v_prediction' )
__lowerCAmelCase: List[str] = torch.mean(torch.abs(UpperCAmelCase ) )
assert abs(result_mean.item() - 0.1453 ) < 1E-3
def UpperCAmelCase ( self : str ) -> List[str]:
__lowerCAmelCase: int = self.full_loop(prediction_type='v_prediction' , use_karras_sigmas=UpperCAmelCase )
__lowerCAmelCase: Tuple = torch.mean(torch.abs(UpperCAmelCase ) )
assert abs(result_mean.item() - 0.0649 ) < 1E-3
def UpperCAmelCase ( self : Optional[Any] ) -> Optional[Any]:
__lowerCAmelCase: Any = self.scheduler_classes[0]
__lowerCAmelCase: Optional[Any] = self.get_scheduler_config(thresholding=UpperCAmelCase , dynamic_thresholding_ratio=0 )
__lowerCAmelCase: List[str] = scheduler_class(**UpperCAmelCase )
__lowerCAmelCase: Optional[int] = 1_0
__lowerCAmelCase: Union[str, Any] = self.dummy_model()
__lowerCAmelCase: int = self.dummy_sample_deter.half()
scheduler.set_timesteps(UpperCAmelCase )
for i, t in enumerate(scheduler.timesteps ):
__lowerCAmelCase: Any = model(UpperCAmelCase , UpperCAmelCase )
__lowerCAmelCase: Any = scheduler.step(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ).prev_sample
assert sample.dtype == torch.floataa
| 322 | 1 |
from __future__ import annotations
from math import pi
from typing import Protocol
import matplotlib.pyplot as plt
import numpy as np
class A_ ( snake_case__ ):
def UpperCAmelCase ( self : Dict , UpperCAmelCase : float ) -> float:
return 0.0
def _a ( SCREAMING_SNAKE_CASE : np.ndarray , SCREAMING_SNAKE_CASE : int ) -> tuple[int | float, int | float]:
"""simple docstring"""
__lowerCAmelCase: Optional[Any] = min([-20, np.min(fft_results[1 : samplerate // 2 - 1] )] )
__lowerCAmelCase: Any = max([20, np.max(fft_results[1 : samplerate // 2 - 1] )] )
return lowest, highest
def _a ( SCREAMING_SNAKE_CASE : FilterType , SCREAMING_SNAKE_CASE : int ) -> None:
"""simple docstring"""
__lowerCAmelCase: Any = 5_12
__lowerCAmelCase: List[Any] = [1] + [0] * (size - 1)
__lowerCAmelCase: Union[str, Any] = [filter_type.process(SCREAMING_SNAKE_CASE ) for item in inputs]
__lowerCAmelCase: Any = [0] * (samplerate - size) # zero-padding
outputs += filler
__lowerCAmelCase: Optional[Any] = np.abs(np.fft.fft(SCREAMING_SNAKE_CASE ) )
__lowerCAmelCase: str = 20 * np.logaa(SCREAMING_SNAKE_CASE )
# Frequencies on log scale from 24 to nyquist frequency
plt.xlim(24 , samplerate / 2 - 1 )
plt.xlabel('Frequency (Hz)' )
plt.xscale('log' )
# Display within reasonable bounds
__lowerCAmelCase: int = get_bounds(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
plt.ylim(max([-80, bounds[0]] ) , min([80, bounds[1]] ) )
plt.ylabel('Gain (dB)' )
plt.plot(SCREAMING_SNAKE_CASE )
plt.show()
def _a ( SCREAMING_SNAKE_CASE : FilterType , SCREAMING_SNAKE_CASE : int ) -> None:
"""simple docstring"""
__lowerCAmelCase: Union[str, Any] = 5_12
__lowerCAmelCase: Dict = [1] + [0] * (size - 1)
__lowerCAmelCase: Any = [filter_type.process(SCREAMING_SNAKE_CASE ) for item in inputs]
__lowerCAmelCase: Union[str, Any] = [0] * (samplerate - size) # zero-padding
outputs += filler
__lowerCAmelCase: int = np.angle(np.fft.fft(SCREAMING_SNAKE_CASE ) )
# Frequencies on log scale from 24 to nyquist frequency
plt.xlim(24 , samplerate / 2 - 1 )
plt.xlabel('Frequency (Hz)' )
plt.xscale('log' )
plt.ylim(-2 * pi , 2 * pi )
plt.ylabel('Phase shift (Radians)' )
plt.plot(np.unwrap(SCREAMING_SNAKE_CASE , -2 * pi ) )
plt.show()
| 322 |
import re
import time
from typing import Optional
import IPython.display as disp
from ..trainer_callback import TrainerCallback
from ..trainer_utils import IntervalStrategy, has_length
def _a ( SCREAMING_SNAKE_CASE : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
__lowerCAmelCase: Union[str, Any] = int(SCREAMING_SNAKE_CASE )
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase: List[str] = t // 36_00, (t // 60) % 60, t % 60
return f'''{h}:{m:02d}:{s:02d}''' if h != 0 else f'''{m:02d}:{s:02d}'''
def _a ( SCREAMING_SNAKE_CASE : Dict , SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : Dict , SCREAMING_SNAKE_CASE : str=3_00 ) -> int:
"""simple docstring"""
return f'''
<div>
{prefix}
<progress value=\'{value}\' max=\'{total}\' style=\'width:{width}px; height:20px; vertical-align: middle;\'></progress>
{label}
</div>
'''
def _a ( SCREAMING_SNAKE_CASE : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
__lowerCAmelCase: List[str] = '<table border="1" class="dataframe">\n'
html_code += """ <thead>\n <tr style="text-align: left;">\n"""
for i in items[0]:
html_code += f''' <th>{i}</th>\n'''
html_code += " </tr>\n </thead>\n <tbody>\n"
for line in items[1:]:
html_code += " <tr>\n"
for elt in line:
__lowerCAmelCase: List[Any] = f'''{elt:.6f}''' if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) else str(SCREAMING_SNAKE_CASE )
html_code += f''' <td>{elt}</td>\n'''
html_code += " </tr>\n"
html_code += " </tbody>\n</table><p>"
return html_code
class A_ :
_lowercase : str = 5
_lowercase : str = 0.2
def __init__( self : Union[str, Any] , UpperCAmelCase : int , UpperCAmelCase : Optional[str] = None , UpperCAmelCase : bool = True , UpperCAmelCase : Optional["NotebookTrainingTracker"] = None , UpperCAmelCase : int = 3_0_0 , ) -> List[Any]:
__lowerCAmelCase: List[str] = total
__lowerCAmelCase: Optional[int] = '' if prefix is None else prefix
__lowerCAmelCase: int = leave
__lowerCAmelCase: List[str] = parent
__lowerCAmelCase: Optional[Any] = width
__lowerCAmelCase: List[str] = None
__lowerCAmelCase: Dict = None
__lowerCAmelCase: List[str] = None
def UpperCAmelCase ( self : Union[str, Any] , UpperCAmelCase : int , UpperCAmelCase : bool = False , UpperCAmelCase : str = None ) -> Optional[int]:
__lowerCAmelCase: int = value
if comment is not None:
__lowerCAmelCase: Any = comment
if self.last_value is None:
__lowerCAmelCase: List[Any] = time.time()
__lowerCAmelCase: Any = value
__lowerCAmelCase: List[str] = None
__lowerCAmelCase: Dict = self.warmup
__lowerCAmelCase: List[str] = 1
self.update_bar(UpperCAmelCase )
elif value <= self.last_value and not force_update:
return
elif force_update or self.first_calls > 0 or value >= min(self.last_value + self.wait_for , self.total ):
if self.first_calls > 0:
self.first_calls -= 1
__lowerCAmelCase: Union[str, Any] = time.time()
__lowerCAmelCase: str = current_time - self.start_time
# We could have value = self.start_value if the update is called twixe with the same start value.
if value > self.start_value:
__lowerCAmelCase: Dict = self.elapsed_time / (value - self.start_value)
else:
__lowerCAmelCase: int = None
if value >= self.total:
__lowerCAmelCase: Any = self.total
__lowerCAmelCase: str = None
if not self.leave:
self.close()
elif self.average_time_per_item is not None:
__lowerCAmelCase: List[str] = self.average_time_per_item * (self.total - value)
self.update_bar(UpperCAmelCase )
__lowerCAmelCase: Tuple = value
__lowerCAmelCase: int = current_time
if self.average_time_per_item is None:
__lowerCAmelCase: Optional[int] = 1
else:
__lowerCAmelCase: Optional[Any] = max(int(self.update_every / self.average_time_per_item ) , 1 )
def UpperCAmelCase ( self : int , UpperCAmelCase : Any , UpperCAmelCase : List[Any]=None ) -> Union[str, Any]:
__lowerCAmelCase: int = ' ' * (len(str(self.total ) ) - len(str(UpperCAmelCase ) )) + str(UpperCAmelCase )
if self.elapsed_time is None:
__lowerCAmelCase: Dict = F'''[{spaced_value}/{self.total} : < :'''
elif self.predicted_remaining is None:
__lowerCAmelCase: str = F'''[{spaced_value}/{self.total} {format_time(self.elapsed_time )}'''
else:
__lowerCAmelCase: Any = (
F'''[{spaced_value}/{self.total} {format_time(self.elapsed_time )} <'''
F''' {format_time(self.predicted_remaining )}'''
)
self.label += F''', {1/self.average_time_per_item:.2f} it/s'''
self.label += "]" if self.comment is None or len(self.comment ) == 0 else F''', {self.comment}]'''
self.display()
def UpperCAmelCase ( self : Any ) -> Optional[Any]:
__lowerCAmelCase: Any = html_progress_bar(self.value , self.total , self.prefix , self.label , self.width )
if self.parent is not None:
# If this is a child bar, the parent will take care of the display.
self.parent.display()
return
if self.output is None:
__lowerCAmelCase: Tuple = disp.display(disp.HTML(self.html_code ) , display_id=UpperCAmelCase )
else:
self.output.update(disp.HTML(self.html_code ) )
def UpperCAmelCase ( self : str ) -> Optional[Any]:
if self.parent is None and self.output is not None:
self.output.update(disp.HTML('' ) )
class A_ ( snake_case__ ):
def __init__( self : Tuple , UpperCAmelCase : Dict , UpperCAmelCase : List[Any]=None ) -> Any:
super().__init__(UpperCAmelCase )
__lowerCAmelCase: Tuple = None if column_names is None else [column_names]
__lowerCAmelCase: Union[str, Any] = None
def UpperCAmelCase ( self : Union[str, Any] ) -> Any:
__lowerCAmelCase: str = html_progress_bar(self.value , self.total , self.prefix , self.label , self.width )
if self.inner_table is not None:
self.html_code += text_to_html_table(self.inner_table )
if self.child_bar is not None:
self.html_code += self.child_bar.html_code
if self.output is None:
__lowerCAmelCase: Optional[Any] = disp.display(disp.HTML(self.html_code ) , display_id=UpperCAmelCase )
else:
self.output.update(disp.HTML(self.html_code ) )
def UpperCAmelCase ( self : Tuple , UpperCAmelCase : List[Any] ) -> Dict:
if self.inner_table is None:
__lowerCAmelCase: List[str] = [list(values.keys() ), list(values.values() )]
else:
__lowerCAmelCase: Any = self.inner_table[0]
if len(self.inner_table ) == 1:
# We give a chance to update the column names at the first iteration
for key in values.keys():
if key not in columns:
columns.append(UpperCAmelCase )
__lowerCAmelCase: List[Any] = columns
self.inner_table.append([values[c] for c in columns] )
def UpperCAmelCase ( self : Dict , UpperCAmelCase : int , UpperCAmelCase : List[Any]=None , UpperCAmelCase : List[str]=3_0_0 ) -> List[Any]:
__lowerCAmelCase: Union[str, Any] = NotebookProgressBar(UpperCAmelCase , prefix=UpperCAmelCase , parent=self , width=UpperCAmelCase )
return self.child_bar
def UpperCAmelCase ( self : Optional[int] ) -> Optional[int]:
__lowerCAmelCase: Tuple = None
self.display()
class A_ ( snake_case__ ):
def __init__( self : Any ) -> List[str]:
__lowerCAmelCase: int = None
__lowerCAmelCase: Optional[int] = None
__lowerCAmelCase: str = False
def UpperCAmelCase ( self : Tuple , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : Optional[int] , UpperCAmelCase : Any , **UpperCAmelCase : Tuple ) -> str:
__lowerCAmelCase: Tuple = 'Epoch' if args.evaluation_strategy == IntervalStrategy.EPOCH else 'Step'
__lowerCAmelCase: Optional[int] = 0
__lowerCAmelCase: Any = 0
__lowerCAmelCase: Tuple = [self.first_column] + ['Training Loss']
if args.evaluation_strategy != IntervalStrategy.NO:
column_names.append('Validation Loss' )
__lowerCAmelCase: List[Any] = NotebookTrainingTracker(state.max_steps , UpperCAmelCase )
def UpperCAmelCase ( self : Optional[int] , UpperCAmelCase : Tuple , UpperCAmelCase : Any , UpperCAmelCase : Optional[Any] , **UpperCAmelCase : Union[str, Any] ) -> Any:
__lowerCAmelCase: Union[str, Any] = int(state.epoch ) if int(state.epoch ) == state.epoch else F'''{state.epoch:.2f}'''
self.training_tracker.update(
state.global_step + 1 , comment=F'''Epoch {epoch}/{state.num_train_epochs}''' , force_update=self._force_next_update , )
__lowerCAmelCase: Any = False
def UpperCAmelCase ( self : List[Any] , UpperCAmelCase : str , UpperCAmelCase : Optional[int] , UpperCAmelCase : Optional[Any] , UpperCAmelCase : int=None , **UpperCAmelCase : Dict ) -> List[Any]:
if not has_length(UpperCAmelCase ):
return
if self.prediction_bar is None:
if self.training_tracker is not None:
__lowerCAmelCase: int = self.training_tracker.add_child(len(UpperCAmelCase ) )
else:
__lowerCAmelCase: List[str] = NotebookProgressBar(len(UpperCAmelCase ) )
self.prediction_bar.update(1 )
else:
self.prediction_bar.update(self.prediction_bar.value + 1 )
def UpperCAmelCase ( self : Union[str, Any] , UpperCAmelCase : Dict , UpperCAmelCase : Dict , UpperCAmelCase : Optional[Any] , **UpperCAmelCase : int ) -> Union[str, Any]:
if self.prediction_bar is not None:
self.prediction_bar.close()
__lowerCAmelCase: Any = None
def UpperCAmelCase ( self : str , UpperCAmelCase : Tuple , UpperCAmelCase : Optional[Any] , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : int=None , **UpperCAmelCase : Optional[Any] ) -> Optional[Any]:
# Only for when there is no evaluation
if args.evaluation_strategy == IntervalStrategy.NO and "loss" in logs:
__lowerCAmelCase: Union[str, Any] = {'Training Loss': logs['loss']}
# First column is necessarily Step sine we're not in epoch eval strategy
__lowerCAmelCase: Dict = state.global_step
self.training_tracker.write_line(UpperCAmelCase )
def UpperCAmelCase ( self : int , UpperCAmelCase : List[Any] , UpperCAmelCase : List[str] , UpperCAmelCase : Optional[int] , UpperCAmelCase : Tuple=None , **UpperCAmelCase : int ) -> List[str]:
if self.training_tracker is not None:
__lowerCAmelCase: Dict = {'Training Loss': 'No log', 'Validation Loss': 'No log'}
for log in reversed(state.log_history ):
if "loss" in log:
__lowerCAmelCase: List[str] = log['loss']
break
if self.first_column == "Epoch":
__lowerCAmelCase: int = int(state.epoch )
else:
__lowerCAmelCase: Tuple = state.global_step
__lowerCAmelCase: Optional[int] = 'eval'
for k in metrics:
if k.endswith('_loss' ):
__lowerCAmelCase: Union[str, Any] = re.sub(R'\_loss$' , '' , UpperCAmelCase )
__lowerCAmelCase: Optional[Any] = metrics.pop('total_flos' , UpperCAmelCase )
__lowerCAmelCase: str = metrics.pop('epoch' , UpperCAmelCase )
__lowerCAmelCase: int = metrics.pop(F'''{metric_key_prefix}_runtime''' , UpperCAmelCase )
__lowerCAmelCase: List[Any] = metrics.pop(F'''{metric_key_prefix}_samples_per_second''' , UpperCAmelCase )
__lowerCAmelCase: List[str] = metrics.pop(F'''{metric_key_prefix}_steps_per_second''' , UpperCAmelCase )
__lowerCAmelCase: Tuple = metrics.pop(F'''{metric_key_prefix}_jit_compilation_time''' , UpperCAmelCase )
for k, v in metrics.items():
if k == F'''{metric_key_prefix}_loss''':
__lowerCAmelCase: Tuple = v
else:
__lowerCAmelCase: int = k.split('_' )
__lowerCAmelCase: List[Any] = ' '.join([part.capitalize() for part in splits[1:]] )
__lowerCAmelCase: List[Any] = v
self.training_tracker.write_line(UpperCAmelCase )
self.training_tracker.remove_child()
__lowerCAmelCase: List[str] = None
# Evaluation takes a long time so we should force the next update.
__lowerCAmelCase: str = True
def UpperCAmelCase ( self : int , UpperCAmelCase : int , UpperCAmelCase : Tuple , UpperCAmelCase : Optional[int] , **UpperCAmelCase : Optional[int] ) -> Optional[int]:
self.training_tracker.update(
state.global_step , comment=F'''Epoch {int(state.epoch )}/{state.num_train_epochs}''' , force_update=UpperCAmelCase )
__lowerCAmelCase: Union[str, Any] = None
| 322 | 1 |
import argparse
import numpy as np
import torch
from transformers import SpeechTaHifiGan, SpeechTaHifiGanConfig, logging
logging.set_verbosity_info()
_a = logging.get_logger('''transformers.models.speecht5''')
def _a ( SCREAMING_SNAKE_CASE : Dict , SCREAMING_SNAKE_CASE : Any , SCREAMING_SNAKE_CASE : Any ) -> Any:
"""simple docstring"""
hf_model.apply_weight_norm()
__lowerCAmelCase: Optional[Any] = checkpoint['input_conv.weight_g']
__lowerCAmelCase: List[str] = checkpoint['input_conv.weight_v']
__lowerCAmelCase: Any = checkpoint['input_conv.bias']
for i in range(len(config.upsample_rates ) ):
__lowerCAmelCase: List[str] = checkpoint[f'''upsamples.{i}.1.weight_g''']
__lowerCAmelCase: int = checkpoint[f'''upsamples.{i}.1.weight_v''']
__lowerCAmelCase: Union[str, Any] = checkpoint[f'''upsamples.{i}.1.bias''']
for i in range(len(config.upsample_rates ) * len(config.resblock_kernel_sizes ) ):
for j in range(len(config.resblock_dilation_sizes ) ):
__lowerCAmelCase: Tuple = checkpoint[f'''blocks.{i}.convs1.{j}.1.weight_g''']
__lowerCAmelCase: Optional[Any] = checkpoint[f'''blocks.{i}.convs1.{j}.1.weight_v''']
__lowerCAmelCase: List[str] = checkpoint[f'''blocks.{i}.convs1.{j}.1.bias''']
__lowerCAmelCase: Any = checkpoint[f'''blocks.{i}.convs2.{j}.1.weight_g''']
__lowerCAmelCase: Optional[Any] = checkpoint[f'''blocks.{i}.convs2.{j}.1.weight_v''']
__lowerCAmelCase: Union[str, Any] = checkpoint[f'''blocks.{i}.convs2.{j}.1.bias''']
__lowerCAmelCase: int = checkpoint['output_conv.1.weight_g']
__lowerCAmelCase: int = checkpoint['output_conv.1.weight_v']
__lowerCAmelCase: str = checkpoint['output_conv.1.bias']
hf_model.remove_weight_norm()
@torch.no_grad()
def _a ( SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : Union[str, Any] , SCREAMING_SNAKE_CASE : Any , SCREAMING_SNAKE_CASE : Union[str, Any]=None , SCREAMING_SNAKE_CASE : Optional[int]=None , ) -> List[Any]:
"""simple docstring"""
if config_path is not None:
__lowerCAmelCase: Optional[int] = SpeechTaHifiGanConfig.from_pretrained(SCREAMING_SNAKE_CASE )
else:
__lowerCAmelCase: Dict = SpeechTaHifiGanConfig()
__lowerCAmelCase: Tuple = SpeechTaHifiGan(SCREAMING_SNAKE_CASE )
__lowerCAmelCase: Tuple = torch.load(SCREAMING_SNAKE_CASE )
load_weights(orig_checkpoint['model']['generator'] , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
__lowerCAmelCase: int = np.load(SCREAMING_SNAKE_CASE )
__lowerCAmelCase: Optional[Any] = stats[0].reshape(-1 )
__lowerCAmelCase: Any = stats[1].reshape(-1 )
__lowerCAmelCase: List[str] = torch.from_numpy(SCREAMING_SNAKE_CASE ).float()
__lowerCAmelCase: Union[str, Any] = torch.from_numpy(SCREAMING_SNAKE_CASE ).float()
model.save_pretrained(SCREAMING_SNAKE_CASE )
if repo_id:
print('Pushing to the hub...' )
model.push_to_hub(SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
_a = argparse.ArgumentParser()
parser.add_argument('''--checkpoint_path''', required=True, default=None, type=str, help='''Path to original checkpoint''')
parser.add_argument('''--stats_path''', required=True, default=None, type=str, help='''Path to stats.npy file''')
parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''')
parser.add_argument(
'''--pytorch_dump_folder_path''', required=True, default=None, type=str, help='''Path to the output PyTorch model.'''
)
parser.add_argument(
'''--push_to_hub''', default=None, type=str, help='''Where to upload the converted model on the 🤗 hub.'''
)
_a = parser.parse_args()
convert_hifigan_checkpoint(
args.checkpoint_path,
args.stats_path,
args.pytorch_dump_folder_path,
args.config_path,
args.push_to_hub,
)
| 322 |
import os
from datetime import datetime as dt
from github import Github
_a = [
'''good first issue''',
'''feature request''',
'''wip''',
]
def _a ( ) -> List[Any]:
"""simple docstring"""
__lowerCAmelCase: Dict = Github(os.environ['GITHUB_TOKEN'] )
__lowerCAmelCase: Tuple = g.get_repo('huggingface/accelerate' )
__lowerCAmelCase: str = repo.get_issues(state='open' )
for issue in open_issues:
__lowerCAmelCase: Optional[int] = sorted([comment for comment in issue.get_comments()] , key=lambda SCREAMING_SNAKE_CASE : i.created_at , reverse=SCREAMING_SNAKE_CASE )
__lowerCAmelCase: Dict = comments[0] if len(SCREAMING_SNAKE_CASE ) > 0 else None
__lowerCAmelCase: Tuple = dt.utcnow()
__lowerCAmelCase: Optional[int] = (current_time - issue.updated_at).days
__lowerCAmelCase: str = (current_time - issue.created_at).days
if (
last_comment is not None
and last_comment.user.login == "github-actions[bot]"
and days_since_updated > 7
and days_since_creation >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Close issue since it has been 7 days of inactivity since bot mention.
issue.edit(state='closed' )
elif (
days_since_updated > 23
and days_since_creation >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Add stale comment
issue.create_comment(
'This issue has been automatically marked as stale because it has not had '
'recent activity. If you think this still needs to be addressed '
'please comment on this thread.\n\nPlease note that issues that do not follow the '
'[contributing guidelines](https://github.com/huggingface/accelerate/blob/main/CONTRIBUTING.md) '
'are likely to be ignored.' )
if __name__ == "__main__":
main()
| 322 | 1 |
from __future__ import annotations
from collections import Counter
from random import random
class A_ :
def __init__( self : List[Any] ) -> List[str]:
__lowerCAmelCase: Dict = {}
def UpperCAmelCase ( self : Tuple , UpperCAmelCase : str ) -> None:
__lowerCAmelCase: Optional[Any] = {}
def UpperCAmelCase ( self : Tuple , UpperCAmelCase : str , UpperCAmelCase : str , UpperCAmelCase : float ) -> None:
if nodea not in self.connections:
self.add_node(UpperCAmelCase )
if nodea not in self.connections:
self.add_node(UpperCAmelCase )
__lowerCAmelCase: List[Any] = probability
def UpperCAmelCase ( self : List[str] ) -> list[str]:
return list(self.connections )
def UpperCAmelCase ( self : Optional[int] , UpperCAmelCase : str ) -> str:
__lowerCAmelCase: int = 0
__lowerCAmelCase: Tuple = random()
for dest in self.connections[node]:
current_probability += self.connections[node][dest]
if current_probability > random_value:
return dest
return ""
def _a ( SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : list[tuple[str, str, float]] , SCREAMING_SNAKE_CASE : int ) -> dict[str, int]:
"""simple docstring"""
__lowerCAmelCase: Tuple = MarkovChainGraphUndirectedUnweighted()
for nodea, nodea, probability in transitions:
graph.add_transition_probability(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
__lowerCAmelCase: int = Counter(graph.get_nodes() )
__lowerCAmelCase: Any = start
for _ in range(SCREAMING_SNAKE_CASE ):
__lowerCAmelCase: Tuple = graph.transition(SCREAMING_SNAKE_CASE )
visited[node] += 1
return visited
if __name__ == "__main__":
import doctest
doctest.testmod()
| 322 |
from .testing import (
are_the_same_tensors,
execute_subprocess_async,
require_bnb,
require_cpu,
require_cuda,
require_huggingface_suite,
require_mps,
require_multi_gpu,
require_multi_xpu,
require_safetensors,
require_single_gpu,
require_single_xpu,
require_torch_min_version,
require_tpu,
require_xpu,
skip,
slow,
)
from .training import RegressionDataset, RegressionModel, RegressionModelaXPU
from .scripts import test_script, test_sync, test_ops # isort: skip
| 322 | 1 |
import json
import os
import shutil
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import AutoConfig, BertConfig, GPTaConfig
from transformers.configuration_utils import PretrainedConfig
from transformers.testing_utils import TOKEN, USER, is_staging_test
sys.path.append(str(Path(__file__).parent.parent / '''utils'''))
from test_module.custom_configuration import CustomConfig # noqa E402
_a = {
'''return_dict''': False,
'''output_hidden_states''': True,
'''output_attentions''': True,
'''torchscript''': True,
'''torch_dtype''': '''float16''',
'''use_bfloat16''': True,
'''tf_legacy_loss''': True,
'''pruned_heads''': {'''a''': 1},
'''tie_word_embeddings''': False,
'''is_decoder''': True,
'''cross_attention_hidden_size''': 1_2_8,
'''add_cross_attention''': True,
'''tie_encoder_decoder''': True,
'''max_length''': 5_0,
'''min_length''': 3,
'''do_sample''': True,
'''early_stopping''': True,
'''num_beams''': 3,
'''num_beam_groups''': 3,
'''diversity_penalty''': 0.5,
'''temperature''': 2.0,
'''top_k''': 1_0,
'''top_p''': 0.7,
'''typical_p''': 0.2,
'''repetition_penalty''': 0.8,
'''length_penalty''': 0.8,
'''no_repeat_ngram_size''': 5,
'''encoder_no_repeat_ngram_size''': 5,
'''bad_words_ids''': [1, 2, 3],
'''num_return_sequences''': 3,
'''chunk_size_feed_forward''': 5,
'''output_scores''': True,
'''return_dict_in_generate''': True,
'''forced_bos_token_id''': 2,
'''forced_eos_token_id''': 3,
'''remove_invalid_values''': True,
'''architectures''': ['''BertModel'''],
'''finetuning_task''': '''translation''',
'''id2label''': {0: '''label'''},
'''label2id''': {'''label''': '''0'''},
'''tokenizer_class''': '''BertTokenizerFast''',
'''prefix''': '''prefix''',
'''bos_token_id''': 6,
'''pad_token_id''': 7,
'''eos_token_id''': 8,
'''sep_token_id''': 9,
'''decoder_start_token_id''': 1_0,
'''exponential_decay_length_penalty''': (5, 1.01),
'''suppress_tokens''': [0, 1],
'''begin_suppress_tokens''': 2,
'''task_specific_params''': {'''translation''': '''some_params'''},
'''problem_type''': '''regression''',
}
@is_staging_test
class A_ ( unittest.TestCase ):
@classmethod
def UpperCAmelCase ( cls : Dict ) -> List[str]:
__lowerCAmelCase: str = TOKEN
HfFolder.save_token(UpperCAmelCase )
@classmethod
def UpperCAmelCase ( cls : str ) -> List[Any]:
try:
delete_repo(token=cls._token , repo_id='test-config' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='valid_org/test-config-org' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='test-dynamic-config' )
except HTTPError:
pass
def UpperCAmelCase ( self : int ) -> Optional[int]:
__lowerCAmelCase: Any = BertConfig(
vocab_size=9_9 , hidden_size=3_2 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=3_7 )
config.push_to_hub('test-config' , use_auth_token=self._token )
__lowerCAmelCase: str = BertConfig.from_pretrained(F'''{USER}/test-config''' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(UpperCAmelCase , getattr(UpperCAmelCase , UpperCAmelCase ) )
# Reset repo
delete_repo(token=self._token , repo_id='test-config' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(UpperCAmelCase , repo_id='test-config' , push_to_hub=UpperCAmelCase , use_auth_token=self._token )
__lowerCAmelCase: Union[str, Any] = BertConfig.from_pretrained(F'''{USER}/test-config''' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(UpperCAmelCase , getattr(UpperCAmelCase , UpperCAmelCase ) )
def UpperCAmelCase ( self : int ) -> Dict:
__lowerCAmelCase: int = BertConfig(
vocab_size=9_9 , hidden_size=3_2 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=3_7 )
config.push_to_hub('valid_org/test-config-org' , use_auth_token=self._token )
__lowerCAmelCase: Dict = BertConfig.from_pretrained('valid_org/test-config-org' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(UpperCAmelCase , getattr(UpperCAmelCase , UpperCAmelCase ) )
# Reset repo
delete_repo(token=self._token , repo_id='valid_org/test-config-org' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(
UpperCAmelCase , repo_id='valid_org/test-config-org' , push_to_hub=UpperCAmelCase , use_auth_token=self._token )
__lowerCAmelCase: int = BertConfig.from_pretrained('valid_org/test-config-org' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(UpperCAmelCase , getattr(UpperCAmelCase , UpperCAmelCase ) )
def UpperCAmelCase ( self : Union[str, Any] ) -> List[str]:
CustomConfig.register_for_auto_class()
__lowerCAmelCase: Any = CustomConfig(attribute=4_2 )
config.push_to_hub('test-dynamic-config' , use_auth_token=self._token )
# This has added the proper auto_map field to the config
self.assertDictEqual(config.auto_map , {'AutoConfig': 'custom_configuration.CustomConfig'} )
__lowerCAmelCase: int = AutoConfig.from_pretrained(F'''{USER}/test-dynamic-config''' , trust_remote_code=UpperCAmelCase )
# Can't make an isinstance check because the new_config is from the FakeConfig class of a dynamic module
self.assertEqual(new_config.__class__.__name__ , 'CustomConfig' )
self.assertEqual(new_config.attribute , 4_2 )
class A_ ( unittest.TestCase ):
def UpperCAmelCase ( self : Union[str, Any] ) -> int:
__lowerCAmelCase: List[Any] = GPTaConfig()
# attempt to modify each of int/float/bool/str config records and verify they were updated
__lowerCAmelCase: Union[str, Any] = c.n_embd + 1 # int
__lowerCAmelCase: str = c.resid_pdrop + 1.0 # float
__lowerCAmelCase: List[Any] = not c.scale_attn_weights # bool
__lowerCAmelCase: List[str] = c.summary_type + 'foo' # str
c.update_from_string(
F'''n_embd={n_embd},resid_pdrop={resid_pdrop},scale_attn_weights={scale_attn_weights},summary_type={summary_type}''' )
self.assertEqual(UpperCAmelCase , c.n_embd , 'mismatch for key: n_embd' )
self.assertEqual(UpperCAmelCase , c.resid_pdrop , 'mismatch for key: resid_pdrop' )
self.assertEqual(UpperCAmelCase , c.scale_attn_weights , 'mismatch for key: scale_attn_weights' )
self.assertEqual(UpperCAmelCase , c.summary_type , 'mismatch for key: summary_type' )
def UpperCAmelCase ( self : Optional[Any] ) -> Any:
__lowerCAmelCase: str = PretrainedConfig()
__lowerCAmelCase: Optional[int] = [key for key in base_config.__dict__ if key not in config_common_kwargs]
# If this part of the test fails, you have arguments to addin config_common_kwargs above.
self.assertListEqual(
UpperCAmelCase , ['is_encoder_decoder', '_name_or_path', '_commit_hash', 'transformers_version'] )
__lowerCAmelCase: int = [key for key, value in config_common_kwargs.items() if value == getattr(UpperCAmelCase , UpperCAmelCase )]
if len(UpperCAmelCase ) > 0:
raise ValueError(
'The following keys are set with the default values in'
' `test_configuration_common.config_common_kwargs` pick another value for them:'
F''' {', '.join(UpperCAmelCase )}.''' )
def UpperCAmelCase ( self : int ) -> Optional[Any]:
with self.assertRaises(UpperCAmelCase ):
# config is in subfolder, the following should not work without specifying the subfolder
__lowerCAmelCase: List[Any] = BertConfig.from_pretrained('hf-internal-testing/tiny-random-bert-subfolder' )
__lowerCAmelCase: List[str] = BertConfig.from_pretrained('hf-internal-testing/tiny-random-bert-subfolder' , subfolder='bert' )
self.assertIsNotNone(UpperCAmelCase )
def UpperCAmelCase ( self : Tuple ) -> List[Any]:
# A mock response for an HTTP head request to emulate server down
__lowerCAmelCase: Union[str, Any] = mock.Mock()
__lowerCAmelCase: str = 5_0_0
__lowerCAmelCase: Optional[Any] = {}
__lowerCAmelCase: Optional[int] = HTTPError
__lowerCAmelCase: List[Any] = {}
# Download this model to make sure it's in the cache.
__lowerCAmelCase: Tuple = BertConfig.from_pretrained('hf-internal-testing/tiny-random-bert' )
# Under the mock environment we get a 500 error when trying to reach the model.
with mock.patch('requests.Session.request' , return_value=UpperCAmelCase ) as mock_head:
__lowerCAmelCase: Union[str, Any] = BertConfig.from_pretrained('hf-internal-testing/tiny-random-bert' )
# This check we did call the fake head request
mock_head.assert_called()
def UpperCAmelCase ( self : Any ) -> Optional[Any]:
# This test is for deprecated behavior and can be removed in v5
__lowerCAmelCase: Tuple = BertConfig.from_pretrained(
'https://huggingface.co/hf-internal-testing/tiny-random-bert/resolve/main/config.json' )
def UpperCAmelCase ( self : Dict ) -> str:
__lowerCAmelCase: Optional[Any] = AutoConfig.from_pretrained('bert-base-cased' )
__lowerCAmelCase: Optional[Any] = ['config.4.0.0.json']
with tempfile.TemporaryDirectory() as tmp_dir:
configuration.save_pretrained(UpperCAmelCase )
__lowerCAmelCase: Tuple = 2
json.dump(configuration.to_dict() , open(os.path.join(UpperCAmelCase , 'config.4.0.0.json' ) , 'w' ) )
# This should pick the new configuration file as the version of Transformers is > 4.0.0
__lowerCAmelCase: Dict = AutoConfig.from_pretrained(UpperCAmelCase )
self.assertEqual(new_configuration.hidden_size , 2 )
# Will need to be adjusted if we reach v42 and this test is still here.
# Should pick the old configuration file as the version of Transformers is < 4.42.0
__lowerCAmelCase: Dict = ['config.42.0.0.json']
__lowerCAmelCase: Optional[int] = 7_6_8
configuration.save_pretrained(UpperCAmelCase )
shutil.move(os.path.join(UpperCAmelCase , 'config.4.0.0.json' ) , os.path.join(UpperCAmelCase , 'config.42.0.0.json' ) )
__lowerCAmelCase: int = AutoConfig.from_pretrained(UpperCAmelCase )
self.assertEqual(new_configuration.hidden_size , 7_6_8 )
def UpperCAmelCase ( self : Union[str, Any] ) -> Dict:
# This repo has two configuration files, one for v4.0.0 and above with a different hidden size.
__lowerCAmelCase: Tuple = 'hf-internal-testing/test-two-configs'
import transformers as new_transformers
__lowerCAmelCase: List[Any] = 'v4.0.0'
__lowerCAmelCase , __lowerCAmelCase: Any = new_transformers.models.auto.AutoConfig.from_pretrained(
UpperCAmelCase , return_unused_kwargs=UpperCAmelCase )
self.assertEqual(new_configuration.hidden_size , 2 )
# This checks `_configuration_file` ia not kept in the kwargs by mistake.
self.assertDictEqual(UpperCAmelCase , {} )
# Testing an older version by monkey-patching the version in the module it's used.
import transformers as old_transformers
__lowerCAmelCase: List[Any] = 'v3.0.0'
__lowerCAmelCase: Union[str, Any] = old_transformers.models.auto.AutoConfig.from_pretrained(UpperCAmelCase )
self.assertEqual(old_configuration.hidden_size , 7_6_8 )
| 322 |
import re
from typing import Callable, List, Optional, Union
import tensorflow as tf
try:
from tensorflow.keras.optimizers.legacy import Adam
except ImportError:
from tensorflow.keras.optimizers import Adam
class A_ ( tf.keras.optimizers.schedules.LearningRateSchedule ):
def __init__( self : Tuple , UpperCAmelCase : float , UpperCAmelCase : Callable , UpperCAmelCase : int , UpperCAmelCase : float = 1.0 , UpperCAmelCase : str = None , ) -> Union[str, Any]:
super().__init__()
__lowerCAmelCase: Optional[Any] = initial_learning_rate
__lowerCAmelCase: str = warmup_steps
__lowerCAmelCase: Optional[int] = power
__lowerCAmelCase: str = decay_schedule_fn
__lowerCAmelCase: Tuple = name
def __call__( self : int , UpperCAmelCase : Dict ) -> Optional[int]:
with tf.name_scope(self.name or 'WarmUp' ) as name:
# Implements polynomial warmup. i.e., if global_step < warmup_steps, the
# learning rate will be `global_step/num_warmup_steps * init_lr`.
__lowerCAmelCase: List[str] = tf.cast(UpperCAmelCase , tf.floataa )
__lowerCAmelCase: Tuple = tf.cast(self.warmup_steps , tf.floataa )
__lowerCAmelCase: List[str] = global_step_float / warmup_steps_float
__lowerCAmelCase: List[str] = self.initial_learning_rate * tf.math.pow(UpperCAmelCase , self.power )
return tf.cond(
global_step_float < warmup_steps_float , lambda: warmup_learning_rate , lambda: self.decay_schedule_fn(step - self.warmup_steps ) , name=UpperCAmelCase , )
def UpperCAmelCase ( self : Tuple ) -> int:
return {
"initial_learning_rate": self.initial_learning_rate,
"decay_schedule_fn": self.decay_schedule_fn,
"warmup_steps": self.warmup_steps,
"power": self.power,
"name": self.name,
}
def _a ( SCREAMING_SNAKE_CASE : float , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : float = 0.0 , SCREAMING_SNAKE_CASE : float = 0.9 , SCREAMING_SNAKE_CASE : float = 0.9_9_9 , SCREAMING_SNAKE_CASE : float = 1E-8 , SCREAMING_SNAKE_CASE : Optional[float] = None , SCREAMING_SNAKE_CASE : Optional[float] = None , SCREAMING_SNAKE_CASE : float = 0.0 , SCREAMING_SNAKE_CASE : float = 1.0 , SCREAMING_SNAKE_CASE : Optional[List[str]] = None , ) -> Optional[Any]:
"""simple docstring"""
__lowerCAmelCase: Tuple = tf.keras.optimizers.schedules.PolynomialDecay(
initial_learning_rate=SCREAMING_SNAKE_CASE , decay_steps=num_train_steps - num_warmup_steps , end_learning_rate=init_lr * min_lr_ratio , power=SCREAMING_SNAKE_CASE , )
if num_warmup_steps:
__lowerCAmelCase: Optional[int] = WarmUp(
initial_learning_rate=SCREAMING_SNAKE_CASE , decay_schedule_fn=SCREAMING_SNAKE_CASE , warmup_steps=SCREAMING_SNAKE_CASE , )
if weight_decay_rate > 0.0:
__lowerCAmelCase: List[Any] = AdamWeightDecay(
learning_rate=SCREAMING_SNAKE_CASE , weight_decay_rate=SCREAMING_SNAKE_CASE , beta_a=SCREAMING_SNAKE_CASE , beta_a=SCREAMING_SNAKE_CASE , epsilon=SCREAMING_SNAKE_CASE , clipnorm=SCREAMING_SNAKE_CASE , global_clipnorm=SCREAMING_SNAKE_CASE , exclude_from_weight_decay=['LayerNorm', 'layer_norm', 'bias'] , include_in_weight_decay=SCREAMING_SNAKE_CASE , )
else:
__lowerCAmelCase: Dict = tf.keras.optimizers.Adam(
learning_rate=SCREAMING_SNAKE_CASE , beta_a=SCREAMING_SNAKE_CASE , beta_a=SCREAMING_SNAKE_CASE , epsilon=SCREAMING_SNAKE_CASE , clipnorm=SCREAMING_SNAKE_CASE , global_clipnorm=SCREAMING_SNAKE_CASE , )
# We return the optimizer and the LR scheduler in order to better track the
# evolution of the LR independently of the optimizer.
return optimizer, lr_schedule
class A_ ( snake_case__ ):
def __init__( self : Tuple , UpperCAmelCase : Union[float, tf.keras.optimizers.schedules.LearningRateSchedule] = 0.001 , UpperCAmelCase : float = 0.9 , UpperCAmelCase : float = 0.999 , UpperCAmelCase : float = 1E-7 , UpperCAmelCase : bool = False , UpperCAmelCase : float = 0.0 , UpperCAmelCase : Optional[List[str]] = None , UpperCAmelCase : Optional[List[str]] = None , UpperCAmelCase : str = "AdamWeightDecay" , **UpperCAmelCase : str , ) -> int:
super().__init__(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , **UpperCAmelCase )
__lowerCAmelCase: List[Any] = weight_decay_rate
__lowerCAmelCase: List[str] = include_in_weight_decay
__lowerCAmelCase: Optional[Any] = exclude_from_weight_decay
@classmethod
def UpperCAmelCase ( cls : str , UpperCAmelCase : Tuple ) -> Optional[int]:
__lowerCAmelCase: Union[str, Any] = {'WarmUp': WarmUp}
return super(UpperCAmelCase , cls ).from_config(UpperCAmelCase , custom_objects=UpperCAmelCase )
def UpperCAmelCase ( self : Optional[int] , UpperCAmelCase : Any , UpperCAmelCase : int , UpperCAmelCase : Optional[int] ) -> Union[str, Any]:
super(UpperCAmelCase , self )._prepare_local(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
__lowerCAmelCase: Union[str, Any] = tf.constant(
self.weight_decay_rate , name='adam_weight_decay_rate' )
def UpperCAmelCase ( self : Dict , UpperCAmelCase : Optional[int] , UpperCAmelCase : List[str] , UpperCAmelCase : Optional[Any] ) -> List[str]:
__lowerCAmelCase: Dict = self._do_use_weight_decay(var.name )
if do_decay:
return var.assign_sub(
learning_rate * var * apply_state[(var.device, var.dtype.base_dtype)]['weight_decay_rate'] , use_locking=self._use_locking , )
return tf.no_op()
def UpperCAmelCase ( self : Optional[int] , UpperCAmelCase : Optional[Any] , UpperCAmelCase : Optional[int]=None , **UpperCAmelCase : Optional[Any] ) -> Union[str, Any]:
__lowerCAmelCase , __lowerCAmelCase: Tuple = list(zip(*UpperCAmelCase ) )
return super(UpperCAmelCase , self ).apply_gradients(zip(UpperCAmelCase , UpperCAmelCase ) , name=UpperCAmelCase , **UpperCAmelCase )
def UpperCAmelCase ( self : str , UpperCAmelCase : Optional[Any] , UpperCAmelCase : List[str] , UpperCAmelCase : Any ) -> str:
if apply_state is None:
return self._decayed_lr_t[var_dtype], {}
__lowerCAmelCase: Dict = apply_state or {}
__lowerCAmelCase: Union[str, Any] = apply_state.get((var_device, var_dtype) )
if coefficients is None:
__lowerCAmelCase: str = self._fallback_apply_state(UpperCAmelCase , UpperCAmelCase )
__lowerCAmelCase: Tuple = coefficients
return coefficients["lr_t"], {"apply_state": apply_state}
def UpperCAmelCase ( self : str , UpperCAmelCase : Tuple , UpperCAmelCase : Optional[Any] , UpperCAmelCase : List[Any]=None ) -> List[Any]:
__lowerCAmelCase , __lowerCAmelCase: Optional[int] = self._get_lr(var.device , var.dtype.base_dtype , UpperCAmelCase )
__lowerCAmelCase: Optional[int] = self._decay_weights_op(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
with tf.control_dependencies([decay] ):
return super(UpperCAmelCase , self )._resource_apply_dense(UpperCAmelCase , UpperCAmelCase , **UpperCAmelCase )
def UpperCAmelCase ( self : List[Any] , UpperCAmelCase : List[str] , UpperCAmelCase : Any , UpperCAmelCase : str , UpperCAmelCase : List[Any]=None ) -> List[str]:
__lowerCAmelCase , __lowerCAmelCase: Any = self._get_lr(var.device , var.dtype.base_dtype , UpperCAmelCase )
__lowerCAmelCase: str = self._decay_weights_op(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
with tf.control_dependencies([decay] ):
return super(UpperCAmelCase , self )._resource_apply_sparse(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , **UpperCAmelCase )
def UpperCAmelCase ( self : Union[str, Any] ) -> List[str]:
__lowerCAmelCase: List[str] = super().get_config()
config.update({'weight_decay_rate': self.weight_decay_rate} )
return config
def UpperCAmelCase ( self : Union[str, Any] , UpperCAmelCase : Union[str, Any] ) -> Union[str, Any]:
if self.weight_decay_rate == 0:
return False
if self._include_in_weight_decay:
for r in self._include_in_weight_decay:
if re.search(UpperCAmelCase , UpperCAmelCase ) is not None:
return True
if self._exclude_from_weight_decay:
for r in self._exclude_from_weight_decay:
if re.search(UpperCAmelCase , UpperCAmelCase ) is not None:
return False
return True
class A_ ( snake_case__ ):
def __init__( self : int ) -> List[Any]:
__lowerCAmelCase: Tuple = []
__lowerCAmelCase: int = None
@property
def UpperCAmelCase ( self : Dict ) -> List[Any]:
if self._accum_steps is None:
__lowerCAmelCase: List[Any] = tf.Variable(
tf.constant(0 , dtype=tf.intaa ) , trainable=UpperCAmelCase , synchronization=tf.VariableSynchronization.ON_READ , aggregation=tf.VariableAggregation.ONLY_FIRST_REPLICA , )
return self._accum_steps.value()
@property
def UpperCAmelCase ( self : Union[str, Any] ) -> int:
if not self._gradients:
raise ValueError('The accumulator should be called first to initialize the gradients' )
return [gradient.value() if gradient is not None else gradient for gradient in self._gradients]
def __call__( self : Optional[Any] , UpperCAmelCase : Any ) -> Any:
if not self._gradients:
__lowerCAmelCase: Any = self.step # Create the step variable.
self._gradients.extend(
[
tf.Variable(
tf.zeros_like(UpperCAmelCase ) , trainable=UpperCAmelCase , synchronization=tf.VariableSynchronization.ON_READ , aggregation=tf.VariableAggregation.ONLY_FIRST_REPLICA , )
if gradient is not None
else gradient
for gradient in gradients
] )
if len(UpperCAmelCase ) != len(self._gradients ):
raise ValueError(F'''Expected {len(self._gradients )} gradients, but got {len(UpperCAmelCase )}''' )
for accum_gradient, gradient in zip(self._gradients , UpperCAmelCase ):
if accum_gradient is not None and gradient is not None:
accum_gradient.assign_add(UpperCAmelCase )
self._accum_steps.assign_add(1 )
def UpperCAmelCase ( self : int ) -> int:
if not self._gradients:
return
self._accum_steps.assign(0 )
for gradient in self._gradients:
if gradient is not None:
gradient.assign(tf.zeros_like(UpperCAmelCase ) )
| 322 | 1 |
def _a ( SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : list[list[int]] ) -> int:
"""simple docstring"""
def update_area_of_max_square(SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : int ) -> int:
# BASE CASE
if row >= rows or col >= cols:
return 0
__lowerCAmelCase: Union[str, Any] = update_area_of_max_square(SCREAMING_SNAKE_CASE , col + 1 )
__lowerCAmelCase: Tuple = update_area_of_max_square(row + 1 , col + 1 )
__lowerCAmelCase: int = update_area_of_max_square(row + 1 , SCREAMING_SNAKE_CASE )
if mat[row][col]:
__lowerCAmelCase: List[str] = 1 + min([right, diagonal, down] )
__lowerCAmelCase: List[str] = max(largest_square_area[0] , SCREAMING_SNAKE_CASE )
return sub_problem_sol
else:
return 0
__lowerCAmelCase: List[str] = [0]
update_area_of_max_square(0 , 0 )
return largest_square_area[0]
def _a ( SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : list[list[int]] ) -> int:
"""simple docstring"""
def update_area_of_max_square_using_dp_array(
SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : list[list[int]] ) -> int:
if row >= rows or col >= cols:
return 0
if dp_array[row][col] != -1:
return dp_array[row][col]
__lowerCAmelCase: List[Any] = update_area_of_max_square_using_dp_array(SCREAMING_SNAKE_CASE , col + 1 , SCREAMING_SNAKE_CASE )
__lowerCAmelCase: Union[str, Any] = update_area_of_max_square_using_dp_array(row + 1 , col + 1 , SCREAMING_SNAKE_CASE )
__lowerCAmelCase: Any = update_area_of_max_square_using_dp_array(row + 1 , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
if mat[row][col]:
__lowerCAmelCase: int = 1 + min([right, diagonal, down] )
__lowerCAmelCase: Union[str, Any] = max(largest_square_area[0] , SCREAMING_SNAKE_CASE )
__lowerCAmelCase: Optional[int] = sub_problem_sol
return sub_problem_sol
else:
return 0
__lowerCAmelCase: int = [0]
__lowerCAmelCase: int = [[-1] * cols for _ in range(SCREAMING_SNAKE_CASE )]
update_area_of_max_square_using_dp_array(0 , 0 , SCREAMING_SNAKE_CASE )
return largest_square_area[0]
def _a ( SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : list[list[int]] ) -> int:
"""simple docstring"""
__lowerCAmelCase: int = [[0] * (cols + 1) for _ in range(rows + 1 )]
__lowerCAmelCase: Optional[Any] = 0
for row in range(rows - 1 , -1 , -1 ):
for col in range(cols - 1 , -1 , -1 ):
__lowerCAmelCase: Union[str, Any] = dp_array[row][col + 1]
__lowerCAmelCase: str = dp_array[row + 1][col + 1]
__lowerCAmelCase: Optional[int] = dp_array[row + 1][col]
if mat[row][col] == 1:
__lowerCAmelCase: Optional[Any] = 1 + min(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
__lowerCAmelCase: str = max(dp_array[row][col] , SCREAMING_SNAKE_CASE )
else:
__lowerCAmelCase: Dict = 0
return largest_square_area
def _a ( SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : list[list[int]] ) -> int:
"""simple docstring"""
__lowerCAmelCase: Tuple = [0] * (cols + 1)
__lowerCAmelCase: Optional[int] = [0] * (cols + 1)
__lowerCAmelCase: str = 0
for row in range(rows - 1 , -1 , -1 ):
for col in range(cols - 1 , -1 , -1 ):
__lowerCAmelCase: int = current_row[col + 1]
__lowerCAmelCase: Union[str, Any] = next_row[col + 1]
__lowerCAmelCase: Any = next_row[col]
if mat[row][col] == 1:
__lowerCAmelCase: str = 1 + min(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
__lowerCAmelCase: str = max(current_row[col] , SCREAMING_SNAKE_CASE )
else:
__lowerCAmelCase: Optional[Any] = 0
__lowerCAmelCase: int = current_row
return largest_square_area
if __name__ == "__main__":
import doctest
doctest.testmod()
print(largest_square_area_in_matrix_bottom_up(2, 2, [[1, 1], [1, 1]]))
| 322 |
import math
from typing import Callable, List, Optional, Union
import numpy as np
import PIL
import torch
from PIL import Image
from transformers import CLIPTextModel, CLIPTokenizer
from diffusers.models import AutoencoderKL, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_upscale import StableDiffusionUpscalePipeline
from diffusers.schedulers import DDIMScheduler, DDPMScheduler, LMSDiscreteScheduler, PNDMScheduler
def _a ( SCREAMING_SNAKE_CASE : Any , SCREAMING_SNAKE_CASE : Any , SCREAMING_SNAKE_CASE : Optional[Any]=[] ) -> str:
"""simple docstring"""
__lowerCAmelCase: Optional[int] = size[0] - overlap_pixels * 2
__lowerCAmelCase: str = size[1] - overlap_pixels * 2
for letter in ["l", "r"]:
if letter in remove_borders:
size_x += overlap_pixels
for letter in ["t", "b"]:
if letter in remove_borders:
size_y += overlap_pixels
__lowerCAmelCase: Any = np.ones((size_y, size_x) , dtype=np.uinta ) * 2_55
__lowerCAmelCase: int = np.pad(SCREAMING_SNAKE_CASE , mode='linear_ramp' , pad_width=SCREAMING_SNAKE_CASE , end_values=0 )
if "l" in remove_borders:
__lowerCAmelCase: Dict = mask[:, overlap_pixels : mask.shape[1]]
if "r" in remove_borders:
__lowerCAmelCase: Tuple = mask[:, 0 : mask.shape[1] - overlap_pixels]
if "t" in remove_borders:
__lowerCAmelCase: List[Any] = mask[overlap_pixels : mask.shape[0], :]
if "b" in remove_borders:
__lowerCAmelCase: List[str] = mask[0 : mask.shape[0] - overlap_pixels, :]
return mask
def _a ( SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : List[str] ) -> List[Any]:
"""simple docstring"""
return max(SCREAMING_SNAKE_CASE , min(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) )
def _a ( SCREAMING_SNAKE_CASE : [int] , SCREAMING_SNAKE_CASE : [int] , SCREAMING_SNAKE_CASE : [int] ) -> int:
"""simple docstring"""
return (
clamp(rect[0] , min[0] , max[0] ),
clamp(rect[1] , min[1] , max[1] ),
clamp(rect[2] , min[0] , max[0] ),
clamp(rect[3] , min[1] , max[1] ),
)
def _a ( SCREAMING_SNAKE_CASE : [int] , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : [int] ) -> Union[str, Any]:
"""simple docstring"""
__lowerCAmelCase: Tuple = list(SCREAMING_SNAKE_CASE )
rect[0] -= overlap
rect[1] -= overlap
rect[2] += overlap
rect[3] += overlap
__lowerCAmelCase: int = clamp_rect(SCREAMING_SNAKE_CASE , [0, 0] , [image_size[0], image_size[1]] )
return rect
def _a ( SCREAMING_SNAKE_CASE : Dict , SCREAMING_SNAKE_CASE : Dict , SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : Optional[Any] ) -> Any:
"""simple docstring"""
__lowerCAmelCase: List[Any] = Image.new('RGB' , (tile.size[0] + original_slice, tile.size[1]) )
result.paste(
original_image.resize((tile.size[0], tile.size[1]) , Image.BICUBIC ).crop(
(slice_x, 0, slice_x + original_slice, tile.size[1]) ) , (0, 0) , )
result.paste(SCREAMING_SNAKE_CASE , (original_slice, 0) )
return result
def _a ( SCREAMING_SNAKE_CASE : List[Any] , SCREAMING_SNAKE_CASE : Any ) -> str:
"""simple docstring"""
__lowerCAmelCase: Union[str, Any] = (original_image_slice * 4, 0, tile.size[0], tile.size[1])
__lowerCAmelCase: List[Any] = tile.crop(SCREAMING_SNAKE_CASE )
return tile
def _a ( SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : List[Any] ) -> Optional[int]:
"""simple docstring"""
__lowerCAmelCase: List[str] = n % d
return n - divisor
class A_ ( snake_case__ ):
def __init__( self : Optional[Any] , UpperCAmelCase : AutoencoderKL , UpperCAmelCase : CLIPTextModel , UpperCAmelCase : CLIPTokenizer , UpperCAmelCase : UNetaDConditionModel , UpperCAmelCase : DDPMScheduler , UpperCAmelCase : Union[DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler] , UpperCAmelCase : int = 3_5_0 , ) -> Optional[Any]:
super().__init__(
vae=UpperCAmelCase , text_encoder=UpperCAmelCase , tokenizer=UpperCAmelCase , unet=UpperCAmelCase , low_res_scheduler=UpperCAmelCase , scheduler=UpperCAmelCase , max_noise_level=UpperCAmelCase , )
def UpperCAmelCase ( self : Any , UpperCAmelCase : List[str] , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : str , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : Optional[int] , UpperCAmelCase : Dict , UpperCAmelCase : str , **UpperCAmelCase : List[Any] ) -> Optional[int]:
torch.manual_seed(0 )
__lowerCAmelCase: Optional[int] = (
min(image.size[0] - (tile_size + original_image_slice) , x * tile_size ),
min(image.size[1] - (tile_size + original_image_slice) , y * tile_size ),
min(image.size[0] , (x + 1) * tile_size ),
min(image.size[1] , (y + 1) * tile_size ),
)
__lowerCAmelCase: Optional[Any] = add_overlap_rect(UpperCAmelCase , UpperCAmelCase , image.size )
__lowerCAmelCase: Any = image.crop(UpperCAmelCase )
__lowerCAmelCase: Any = ((crop_rect[0] + ((crop_rect[2] - crop_rect[0]) / 2)) / image.size[0]) * tile.size[0]
__lowerCAmelCase: Tuple = translated_slice_x - (original_image_slice / 2)
__lowerCAmelCase: Union[str, Any] = max(0 , UpperCAmelCase )
__lowerCAmelCase: Optional[int] = squeeze_tile(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
__lowerCAmelCase: Union[str, Any] = to_input.size
__lowerCAmelCase: List[Any] = to_input.resize((tile_size, tile_size) , Image.BICUBIC )
__lowerCAmelCase: int = super(UpperCAmelCase , self ).__call__(image=UpperCAmelCase , **UpperCAmelCase ).images[0]
__lowerCAmelCase: Dict = upscaled_tile.resize((orig_input_size[0] * 4, orig_input_size[1] * 4) , Image.BICUBIC )
__lowerCAmelCase: Union[str, Any] = unsqueeze_tile(UpperCAmelCase , UpperCAmelCase )
__lowerCAmelCase: Optional[int] = upscaled_tile.resize((tile.size[0] * 4, tile.size[1] * 4) , Image.BICUBIC )
__lowerCAmelCase: Optional[int] = []
if x == 0:
remove_borders.append('l' )
elif crop_rect[2] == image.size[0]:
remove_borders.append('r' )
if y == 0:
remove_borders.append('t' )
elif crop_rect[3] == image.size[1]:
remove_borders.append('b' )
__lowerCAmelCase: int = Image.fromarray(
make_transparency_mask(
(upscaled_tile.size[0], upscaled_tile.size[1]) , tile_border * 4 , remove_borders=UpperCAmelCase ) , mode='L' , )
final_image.paste(
UpperCAmelCase , (crop_rect_with_overlap[0] * 4, crop_rect_with_overlap[1] * 4) , UpperCAmelCase )
@torch.no_grad()
def __call__( self : Optional[Any] , UpperCAmelCase : Union[str, List[str]] , UpperCAmelCase : Union[PIL.Image.Image, List[PIL.Image.Image]] , UpperCAmelCase : int = 7_5 , UpperCAmelCase : float = 9.0 , UpperCAmelCase : int = 5_0 , UpperCAmelCase : Optional[Union[str, List[str]]] = None , UpperCAmelCase : Optional[int] = 1 , UpperCAmelCase : float = 0.0 , UpperCAmelCase : Optional[torch.Generator] = None , UpperCAmelCase : Optional[torch.FloatTensor] = None , UpperCAmelCase : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , UpperCAmelCase : int = 1 , UpperCAmelCase : int = 1_2_8 , UpperCAmelCase : int = 3_2 , UpperCAmelCase : int = 3_2 , ) -> str:
__lowerCAmelCase: List[Any] = Image.new('RGB' , (image.size[0] * 4, image.size[1] * 4) )
__lowerCAmelCase: str = math.ceil(image.size[0] / tile_size )
__lowerCAmelCase: List[Any] = math.ceil(image.size[1] / tile_size )
__lowerCAmelCase: Optional[Any] = tcx * tcy
__lowerCAmelCase: Tuple = 0
for y in range(UpperCAmelCase ):
for x in range(UpperCAmelCase ):
self._process_tile(
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , prompt=UpperCAmelCase , num_inference_steps=UpperCAmelCase , guidance_scale=UpperCAmelCase , noise_level=UpperCAmelCase , negative_prompt=UpperCAmelCase , num_images_per_prompt=UpperCAmelCase , eta=UpperCAmelCase , generator=UpperCAmelCase , latents=UpperCAmelCase , )
current_count += 1
if callback is not None:
callback({'progress': current_count / total_tile_count, 'image': final_image} )
return final_image
def _a ( ) -> int:
"""simple docstring"""
__lowerCAmelCase: Any = 'stabilityai/stable-diffusion-x4-upscaler'
__lowerCAmelCase: Dict = StableDiffusionTiledUpscalePipeline.from_pretrained(SCREAMING_SNAKE_CASE , revision='fp16' , torch_dtype=torch.floataa )
__lowerCAmelCase: Optional[Any] = pipe.to('cuda' )
__lowerCAmelCase: Tuple = Image.open('../../docs/source/imgs/diffusers_library.jpg' )
def callback(SCREAMING_SNAKE_CASE : Tuple ):
print(f'''progress: {obj['progress']:.4f}''' )
obj["image"].save('diffusers_library_progress.jpg' )
__lowerCAmelCase: str = pipe(image=SCREAMING_SNAKE_CASE , prompt='Black font, white background, vector' , noise_level=40 , callback=SCREAMING_SNAKE_CASE )
final_image.save('diffusers_library.jpg' )
if __name__ == "__main__":
main()
| 322 | 1 |
from typing import List, Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_a = logging.get_logger(__name__)
_a = {
'''huggingface/autoformer-tourism-monthly''': '''https://huggingface.co/huggingface/autoformer-tourism-monthly/resolve/main/config.json''',
}
class A_ ( snake_case__ ):
_lowercase : Optional[Any] = 'autoformer'
_lowercase : List[Any] = {
'hidden_size': 'd_model',
'num_attention_heads': 'encoder_attention_heads',
'num_hidden_layers': 'encoder_layers',
}
def __init__( self : str , UpperCAmelCase : Optional[int] = None , UpperCAmelCase : Optional[int] = None , UpperCAmelCase : str = "student_t" , UpperCAmelCase : str = "nll" , UpperCAmelCase : int = 1 , UpperCAmelCase : List[int] = [1, 2, 3, 4, 5, 6, 7] , UpperCAmelCase : bool = True , UpperCAmelCase : int = 0 , UpperCAmelCase : int = 0 , UpperCAmelCase : int = 0 , UpperCAmelCase : int = 0 , UpperCAmelCase : Optional[List[int]] = None , UpperCAmelCase : Optional[List[int]] = None , UpperCAmelCase : int = 6_4 , UpperCAmelCase : int = 2 , UpperCAmelCase : int = 2 , UpperCAmelCase : int = 2 , UpperCAmelCase : int = 2 , UpperCAmelCase : int = 3_2 , UpperCAmelCase : int = 3_2 , UpperCAmelCase : str = "gelu" , UpperCAmelCase : float = 0.1 , UpperCAmelCase : float = 0.1 , UpperCAmelCase : float = 0.1 , UpperCAmelCase : float = 0.1 , UpperCAmelCase : float = 0.1 , UpperCAmelCase : int = 1_0_0 , UpperCAmelCase : float = 0.02 , UpperCAmelCase : bool = True , UpperCAmelCase : Tuple=True , UpperCAmelCase : int = 1_0 , UpperCAmelCase : int = 2_5 , UpperCAmelCase : int = 3 , **UpperCAmelCase : Optional[Any] , ) -> str:
# time series specific configuration
__lowerCAmelCase: Optional[int] = prediction_length
__lowerCAmelCase: Optional[Any] = context_length if context_length is not None else prediction_length
__lowerCAmelCase: Union[str, Any] = distribution_output
__lowerCAmelCase: Tuple = loss
__lowerCAmelCase: Dict = input_size
__lowerCAmelCase: Any = num_time_features
__lowerCAmelCase: Optional[int] = lags_sequence
__lowerCAmelCase: Optional[int] = scaling
__lowerCAmelCase: Union[str, Any] = num_dynamic_real_features
__lowerCAmelCase: List[Any] = num_static_real_features
__lowerCAmelCase: Union[str, Any] = num_static_categorical_features
if cardinality is not None and num_static_categorical_features > 0:
if len(UpperCAmelCase ) != num_static_categorical_features:
raise ValueError(
'The cardinality should be a list of the same length as `num_static_categorical_features`' )
__lowerCAmelCase: List[Any] = cardinality
else:
__lowerCAmelCase: Dict = [0]
if embedding_dimension is not None and num_static_categorical_features > 0:
if len(UpperCAmelCase ) != num_static_categorical_features:
raise ValueError(
'The embedding dimension should be a list of the same length as `num_static_categorical_features`' )
__lowerCAmelCase: Optional[int] = embedding_dimension
else:
__lowerCAmelCase: Union[str, Any] = [min(5_0 , (cat + 1) // 2 ) for cat in self.cardinality]
__lowerCAmelCase: Union[str, Any] = num_parallel_samples
# Transformer architecture configuration
__lowerCAmelCase: str = input_size * len(self.lags_sequence ) + self._number_of_features
__lowerCAmelCase: Any = d_model
__lowerCAmelCase: Tuple = encoder_attention_heads
__lowerCAmelCase: int = decoder_attention_heads
__lowerCAmelCase: Union[str, Any] = encoder_ffn_dim
__lowerCAmelCase: Tuple = decoder_ffn_dim
__lowerCAmelCase: Optional[int] = encoder_layers
__lowerCAmelCase: str = decoder_layers
__lowerCAmelCase: Union[str, Any] = dropout
__lowerCAmelCase: Tuple = attention_dropout
__lowerCAmelCase: Tuple = activation_dropout
__lowerCAmelCase: Any = encoder_layerdrop
__lowerCAmelCase: List[str] = decoder_layerdrop
__lowerCAmelCase: List[Any] = activation_function
__lowerCAmelCase: int = init_std
__lowerCAmelCase: Optional[Any] = use_cache
# Autoformer
__lowerCAmelCase: Optional[int] = label_length
__lowerCAmelCase: Optional[int] = moving_average
__lowerCAmelCase: Dict = autocorrelation_factor
super().__init__(is_encoder_decoder=UpperCAmelCase , **UpperCAmelCase )
@property
def UpperCAmelCase ( self : List[str] ) -> int:
return (
sum(self.embedding_dimension )
+ self.num_dynamic_real_features
+ self.num_time_features
+ self.num_static_real_features
+ self.input_size * 2 # the log1p(abs(loc)) and log(scale) features
)
| 322 |
def _a ( SCREAMING_SNAKE_CASE : str ) -> Union[str, Any]:
"""simple docstring"""
__lowerCAmelCase: str = len(SCREAMING_SNAKE_CASE )
__lowerCAmelCase: List[Any] = sum(SCREAMING_SNAKE_CASE )
__lowerCAmelCase: str = [[False for x in range(s + 1 )] for y in range(n + 1 )]
for i in range(1 , n + 1 ):
__lowerCAmelCase: Tuple = True
for i in range(1 , s + 1 ):
__lowerCAmelCase: Any = False
for i in range(1 , n + 1 ):
for j in range(1 , s + 1 ):
__lowerCAmelCase: Optional[int] = dp[i][j - 1]
if arr[i - 1] <= j:
__lowerCAmelCase: Union[str, Any] = dp[i][j] or dp[i - 1][j - arr[i - 1]]
for j in range(int(s / 2 ) , -1 , -1 ):
if dp[n][j] is True:
__lowerCAmelCase: Tuple = s - 2 * j
break
return diff
| 322 | 1 |
import collections
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
_a = logging.get_logger(__name__)
_a = '''▁'''
_a = {'''vocab_file''': '''prophetnet.tokenizer'''}
_a = {
'''vocab_file''': {
'''microsoft/xprophetnet-large-wiki100-cased''': (
'''https://huggingface.co/microsoft/xprophetnet-large-wiki100-cased/resolve/main/prophetnet.tokenizer'''
),
}
}
_a = {
'''microsoft/xprophetnet-large-wiki100-cased''': {'''do_lower_case''': False},
}
_a = {
'''microsoft/xprophetnet-large-wiki100-cased''': 5_1_2,
}
def _a ( SCREAMING_SNAKE_CASE : List[Any] ) -> Dict:
"""simple docstring"""
__lowerCAmelCase: Optional[Any] = collections.OrderedDict()
with open(SCREAMING_SNAKE_CASE , 'r' , encoding='utf-8' ) as reader:
__lowerCAmelCase: Any = reader.readlines()
for index, token in enumerate(SCREAMING_SNAKE_CASE ):
__lowerCAmelCase: Optional[Any] = token.rstrip('\n' )
__lowerCAmelCase: int = index
return vocab
class A_ ( snake_case__ ):
_lowercase : Dict = VOCAB_FILES_NAMES
_lowercase : List[Any] = PRETRAINED_VOCAB_FILES_MAP
_lowercase : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_lowercase : Union[str, Any] = ['input_ids', 'attention_mask']
def __init__( self : List[str] , UpperCAmelCase : str , UpperCAmelCase : str="[SEP]" , UpperCAmelCase : Tuple="[SEP]" , UpperCAmelCase : Optional[int]="[SEP]" , UpperCAmelCase : int="[UNK]" , UpperCAmelCase : Optional[int]="[PAD]" , UpperCAmelCase : int="[CLS]" , UpperCAmelCase : int="[MASK]" , UpperCAmelCase : Optional[Dict[str, Any]] = None , **UpperCAmelCase : str , ) -> None:
__lowerCAmelCase: Dict = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=UpperCAmelCase , eos_token=UpperCAmelCase , sep_token=UpperCAmelCase , unk_token=UpperCAmelCase , pad_token=UpperCAmelCase , cls_token=UpperCAmelCase , mask_token=UpperCAmelCase , sp_model_kwargs=self.sp_model_kwargs , **UpperCAmelCase , )
try:
import sentencepiece as spm
except ImportError:
logger.warning(
'You need to install SentencePiece to use XLMRobertaTokenizer: https://github.com/google/sentencepiece'
' pip install sentencepiece' )
raise
__lowerCAmelCase: List[str] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(UpperCAmelCase ) )
__lowerCAmelCase: int = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# put special tokens and [unused] tokens into the vocab
__lowerCAmelCase: Tuple = {'[PAD]': 0, '[CLS]': 1, '[SEP]': 2, '[UNK]': 3, '[MASK]': 4}
for i in range(1_0 ):
__lowerCAmelCase: Optional[int] = F'''[unused{i}]'''
__lowerCAmelCase: List[str] = 5 + i
# The first "real" token "," has position 15 in the embedding vocab and position 3 in the spm vocab
__lowerCAmelCase: List[Any] = 1_2
__lowerCAmelCase: Optional[int] = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
for k in self.fairseq_tokens_to_ids.keys():
self.unique_no_split_tokens.append(UpperCAmelCase )
def __getstate__( self : Optional[int] ) -> int:
__lowerCAmelCase: Tuple = self.__dict__.copy()
__lowerCAmelCase: Tuple = None
return state
def __setstate__( self : int , UpperCAmelCase : List[Any] ) -> List[Any]:
__lowerCAmelCase: List[Any] = d
try:
import sentencepiece as spm
except ImportError:
logger.warning(
'You need to install SentencePiece to use XLMRobertaTokenizer: https://github.com/google/sentencepiece'
' pip install sentencepiece' )
raise
# for backward compatibility
if not hasattr(self , 'sp_model_kwargs' ):
__lowerCAmelCase: Dict = {}
__lowerCAmelCase: Union[str, Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def UpperCAmelCase ( self : int , UpperCAmelCase : List[int] , UpperCAmelCase : Optional[List[int]] = None , UpperCAmelCase : bool = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=UpperCAmelCase , token_ids_a=UpperCAmelCase , already_has_special_tokens=UpperCAmelCase )
if token_ids_a is None:
return ([0] * len(UpperCAmelCase )) + [1]
return ([0] * len(UpperCAmelCase )) + [1] + ([0] * len(UpperCAmelCase )) + [1]
def UpperCAmelCase ( self : Tuple , UpperCAmelCase : List[int] , UpperCAmelCase : Optional[List[int]] = None ) -> List[int]:
__lowerCAmelCase: List[str] = [self.sep_token_id]
if token_ids_a is None:
return len(token_ids_a + sep ) * [0]
return len(token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def UpperCAmelCase ( self : Any ) -> List[Any]:
return len(self.sp_model ) + self.fairseq_offset
def UpperCAmelCase ( self : Any ) -> Any:
__lowerCAmelCase: List[Any] = {self.convert_ids_to_tokens(UpperCAmelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def UpperCAmelCase ( self : List[Any] , UpperCAmelCase : str ) -> str:
return self.sp_model.encode(UpperCAmelCase , out_type=UpperCAmelCase )
def UpperCAmelCase ( self : int , UpperCAmelCase : Union[str, Any] ) -> Optional[Any]:
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
__lowerCAmelCase: Optional[int] = self.sp_model.PieceToId(UpperCAmelCase )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def UpperCAmelCase ( self : List[str] , UpperCAmelCase : int ) -> Optional[int]:
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def UpperCAmelCase ( self : Optional[Any] , UpperCAmelCase : int ) -> Any:
__lowerCAmelCase: Any = ''.join(UpperCAmelCase ).replace(UpperCAmelCase , ' ' ).strip()
return out_string
def UpperCAmelCase ( self : Optional[int] , UpperCAmelCase : str , UpperCAmelCase : Optional[str] = None ) -> Tuple[str]:
if not os.path.isdir(UpperCAmelCase ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
__lowerCAmelCase: Tuple = os.path.join(
UpperCAmelCase , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCAmelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , UpperCAmelCase )
elif not os.path.isfile(self.vocab_file ):
with open(UpperCAmelCase , 'wb' ) as fi:
__lowerCAmelCase: List[str] = self.sp_model.serialized_model_proto()
fi.write(UpperCAmelCase )
return (out_vocab_file,)
def UpperCAmelCase ( self : str , UpperCAmelCase : List[int] , UpperCAmelCase : Optional[List[int]] = None ) -> List[int]:
if token_ids_a is None:
return token_ids_a + [self.sep_token_id]
__lowerCAmelCase: List[str] = [self.sep_token_id]
return token_ids_a + sep + token_ids_a + sep
| 322 |
from __future__ import annotations
def _a ( SCREAMING_SNAKE_CASE : list[int] , SCREAMING_SNAKE_CASE : int ) -> list[int]:
"""simple docstring"""
__lowerCAmelCase: int = 0
__lowerCAmelCase: Tuple = len(SCREAMING_SNAKE_CASE ) - 1
while i < j:
if nums[i] + nums[j] == target:
return [i, j]
elif nums[i] + nums[j] < target:
__lowerCAmelCase: Tuple = i + 1
else:
__lowerCAmelCase: List[str] = j - 1
return []
if __name__ == "__main__":
import doctest
doctest.testmod()
print(f"{two_pointer([2, 7, 1_1, 1_5], 9) = }")
| 322 | 1 |
import unittest
from transformers import AutoTokenizer, FalconConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
FalconForCausalLM,
FalconForQuestionAnswering,
FalconForSequenceClassification,
FalconForTokenClassification,
FalconModel,
)
class A_ :
def __init__( self : List[str] , UpperCAmelCase : Tuple , UpperCAmelCase : Tuple=3 , UpperCAmelCase : Tuple=7 , UpperCAmelCase : List[Any]=True , UpperCAmelCase : List[str]=True , UpperCAmelCase : str=False , UpperCAmelCase : Tuple=True , UpperCAmelCase : List[Any]=9_9 , UpperCAmelCase : Union[str, Any]=3_2 , UpperCAmelCase : Optional[Any]=5 , UpperCAmelCase : Any=4 , UpperCAmelCase : Tuple=3_7 , UpperCAmelCase : Any="gelu" , UpperCAmelCase : List[Any]=0.1 , UpperCAmelCase : Optional[int]=0.1 , UpperCAmelCase : int=5_1_2 , UpperCAmelCase : Dict=1_6 , UpperCAmelCase : List[str]=2 , UpperCAmelCase : int=0.02 , UpperCAmelCase : Dict=3 , UpperCAmelCase : Optional[Any]=4 , UpperCAmelCase : Tuple=None , ) -> Union[str, Any]:
__lowerCAmelCase: Any = parent
__lowerCAmelCase: Optional[Any] = batch_size
__lowerCAmelCase: Tuple = seq_length
__lowerCAmelCase: int = is_training
__lowerCAmelCase: Optional[Any] = use_input_mask
__lowerCAmelCase: List[str] = use_token_type_ids
__lowerCAmelCase: Union[str, Any] = use_labels
__lowerCAmelCase: Dict = vocab_size
__lowerCAmelCase: List[str] = hidden_size
__lowerCAmelCase: Dict = num_hidden_layers
__lowerCAmelCase: List[Any] = num_attention_heads
__lowerCAmelCase: int = intermediate_size
__lowerCAmelCase: Dict = hidden_act
__lowerCAmelCase: Optional[Any] = hidden_dropout_prob
__lowerCAmelCase: str = attention_probs_dropout_prob
__lowerCAmelCase: List[str] = max_position_embeddings
__lowerCAmelCase: int = type_vocab_size
__lowerCAmelCase: str = type_sequence_label_size
__lowerCAmelCase: List[Any] = initializer_range
__lowerCAmelCase: Dict = num_labels
__lowerCAmelCase: Tuple = num_choices
__lowerCAmelCase: Tuple = scope
def UpperCAmelCase ( self : Union[str, Any] ) -> Dict:
__lowerCAmelCase: Dict = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__lowerCAmelCase: Dict = None
if self.use_input_mask:
__lowerCAmelCase: Optional[int] = random_attention_mask([self.batch_size, self.seq_length] )
__lowerCAmelCase: Union[str, Any] = None
__lowerCAmelCase: Tuple = None
__lowerCAmelCase: Optional[int] = None
__lowerCAmelCase: Dict = None
if self.use_labels:
__lowerCAmelCase: Optional[int] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__lowerCAmelCase: Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__lowerCAmelCase: Optional[Any] = ids_tensor([self.batch_size] , self.num_choices )
__lowerCAmelCase: Tuple = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCAmelCase ( self : Union[str, Any] ) -> List[Any]:
return FalconConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=UpperCAmelCase , initializer_range=self.initializer_range , pad_token_id=1 , new_decoder_architecture=UpperCAmelCase , )
def UpperCAmelCase ( self : Any , UpperCAmelCase : Any , UpperCAmelCase : Dict , UpperCAmelCase : Optional[int] , UpperCAmelCase : Optional[Any] , UpperCAmelCase : Tuple , UpperCAmelCase : int , UpperCAmelCase : List[str] ) -> Dict:
__lowerCAmelCase: List[str] = FalconModel(config=UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
__lowerCAmelCase: List[str] = model(UpperCAmelCase , attention_mask=UpperCAmelCase )
__lowerCAmelCase: Dict = model(UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCAmelCase ( self : List[Any] , UpperCAmelCase : List[Any] , UpperCAmelCase : Tuple , UpperCAmelCase : Optional[int] , UpperCAmelCase : List[Any] , UpperCAmelCase : Tuple , UpperCAmelCase : Optional[int] , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : List[Any] , UpperCAmelCase : Optional[int] , ) -> Dict:
__lowerCAmelCase: str = True
__lowerCAmelCase: Optional[int] = FalconModel(UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
__lowerCAmelCase: List[str] = model(
UpperCAmelCase , attention_mask=UpperCAmelCase , encoder_hidden_states=UpperCAmelCase , encoder_attention_mask=UpperCAmelCase , )
__lowerCAmelCase: Any = model(
UpperCAmelCase , attention_mask=UpperCAmelCase , encoder_hidden_states=UpperCAmelCase , )
__lowerCAmelCase: Union[str, Any] = model(UpperCAmelCase , attention_mask=UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCAmelCase ( self : List[Any] , UpperCAmelCase : Any , UpperCAmelCase : Optional[Any] , UpperCAmelCase : List[Any] , UpperCAmelCase : Optional[Any] , UpperCAmelCase : Optional[int] , UpperCAmelCase : int , UpperCAmelCase : Dict , UpperCAmelCase : Optional[Any] , UpperCAmelCase : str , ) -> Union[str, Any]:
__lowerCAmelCase: int = FalconForCausalLM(config=UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
__lowerCAmelCase: List[str] = model(UpperCAmelCase , attention_mask=UpperCAmelCase , labels=UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCAmelCase ( self : List[str] , UpperCAmelCase : Optional[Any] , UpperCAmelCase : Optional[int] , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : List[str] , UpperCAmelCase : Dict , UpperCAmelCase : int , UpperCAmelCase : Dict , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : Optional[Any] , ) -> Optional[Any]:
__lowerCAmelCase: str = True
__lowerCAmelCase: Optional[Any] = True
__lowerCAmelCase: List[str] = FalconForCausalLM(config=UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
# first forward pass
__lowerCAmelCase: Optional[int] = model(
UpperCAmelCase , attention_mask=UpperCAmelCase , encoder_hidden_states=UpperCAmelCase , encoder_attention_mask=UpperCAmelCase , use_cache=UpperCAmelCase , )
__lowerCAmelCase: Optional[Any] = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
__lowerCAmelCase: Optional[int] = ids_tensor((self.batch_size, 3) , config.vocab_size )
__lowerCAmelCase: Any = ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
__lowerCAmelCase: Any = torch.cat([input_ids, next_tokens] , dim=-1 )
__lowerCAmelCase: Union[str, Any] = torch.cat([input_mask, next_mask] , dim=-1 )
__lowerCAmelCase: str = model(
UpperCAmelCase , attention_mask=UpperCAmelCase , encoder_hidden_states=UpperCAmelCase , encoder_attention_mask=UpperCAmelCase , output_hidden_states=UpperCAmelCase , )['hidden_states'][0]
__lowerCAmelCase: Union[str, Any] = model(
UpperCAmelCase , attention_mask=UpperCAmelCase , encoder_hidden_states=UpperCAmelCase , encoder_attention_mask=UpperCAmelCase , past_key_values=UpperCAmelCase , output_hidden_states=UpperCAmelCase , )['hidden_states'][0]
# select random slice
__lowerCAmelCase: List[Any] = ids_tensor((1,) , output_from_past.shape[-1] ).item()
__lowerCAmelCase: List[Any] = output_from_no_past[:, -3:, random_slice_idx].detach()
__lowerCAmelCase: Tuple = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(UpperCAmelCase , UpperCAmelCase , atol=1E-3 ) )
def UpperCAmelCase ( self : Union[str, Any] ) -> Any:
__lowerCAmelCase: Tuple = self.prepare_config_and_inputs()
(
(
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) ,
): List[str] = config_and_inputs
__lowerCAmelCase: Optional[Any] = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class A_ ( snake_case__ , snake_case__ , snake_case__ , unittest.TestCase ):
_lowercase : List[str] = (
(
FalconModel,
FalconForCausalLM,
FalconForSequenceClassification,
FalconForTokenClassification,
FalconForQuestionAnswering,
)
if is_torch_available()
else ()
)
_lowercase : Any = (FalconForCausalLM,) if is_torch_available() else ()
_lowercase : Dict = (
{
'feature-extraction': FalconModel,
'text-classification': FalconForSequenceClassification,
'text-generation': FalconForCausalLM,
'question-answering': FalconForQuestionAnswering,
'token-classification': FalconForTokenClassification,
'zero-shot': FalconForSequenceClassification,
}
if is_torch_available()
else {}
)
_lowercase : Optional[Any] = False
_lowercase : Optional[int] = False
def UpperCAmelCase ( self : Dict ) -> Tuple:
__lowerCAmelCase: List[Any] = FalconModelTester(self )
__lowerCAmelCase: Dict = ConfigTester(self , config_class=UpperCAmelCase , hidden_size=3_7 )
def UpperCAmelCase ( self : List[Any] ) -> List[Any]:
self.config_tester.run_common_tests()
def UpperCAmelCase ( self : Optional[int] ) -> str:
__lowerCAmelCase: Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCAmelCase )
def UpperCAmelCase ( self : Dict ) -> Optional[Any]:
__lowerCAmelCase , *__lowerCAmelCase: Any = self.model_tester.prepare_config_and_inputs()
for alibi in [True, False]:
__lowerCAmelCase: Dict = alibi
self.model_tester.create_and_check_model(UpperCAmelCase , *UpperCAmelCase )
def UpperCAmelCase ( self : Dict ) -> List[Any]:
__lowerCAmelCase , __lowerCAmelCase: List[str] = self.model_tester.prepare_config_and_inputs_for_common()
__lowerCAmelCase: Tuple = 3
__lowerCAmelCase: Union[str, Any] = input_dict['input_ids']
__lowerCAmelCase: Optional[Any] = input_ids.ne(1 ).to(UpperCAmelCase )
__lowerCAmelCase: Optional[int] = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
__lowerCAmelCase: Dict = FalconForSequenceClassification(UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
__lowerCAmelCase: List[Any] = model(UpperCAmelCase , attention_mask=UpperCAmelCase , labels=UpperCAmelCase )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def UpperCAmelCase ( self : Tuple ) -> str:
__lowerCAmelCase , __lowerCAmelCase: Dict = self.model_tester.prepare_config_and_inputs_for_common()
__lowerCAmelCase: List[Any] = 3
__lowerCAmelCase: List[str] = 'single_label_classification'
__lowerCAmelCase: List[str] = input_dict['input_ids']
__lowerCAmelCase: Optional[int] = input_ids.ne(1 ).to(UpperCAmelCase )
__lowerCAmelCase: Any = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
__lowerCAmelCase: str = FalconForSequenceClassification(UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
__lowerCAmelCase: int = model(UpperCAmelCase , attention_mask=UpperCAmelCase , labels=UpperCAmelCase )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def UpperCAmelCase ( self : List[str] ) -> str:
__lowerCAmelCase , __lowerCAmelCase: List[str] = self.model_tester.prepare_config_and_inputs_for_common()
__lowerCAmelCase: Union[str, Any] = input_dict['input_ids']
__lowerCAmelCase: Dict = FalconForCausalLM(UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
__lowerCAmelCase: List[str] = model(UpperCAmelCase , use_cache=UpperCAmelCase )
__lowerCAmelCase: Dict = input_ids.shape[0]
__lowerCAmelCase: Dict = model._convert_to_rw_cache(result.past_key_values )
__lowerCAmelCase: List[str] = model._convert_cache_to_standard_format(UpperCAmelCase , UpperCAmelCase )
for layer in range(len(UpperCAmelCase ) ):
for tensor_idx in range(2 ):
self.assertTrue(rw_cache[layer][tensor_idx].ndim == 3 )
self.assertTrue(result.past_key_values[layer][tensor_idx].ndim == 4 )
self.assertTrue(
torch.all(result.past_key_values[layer][tensor_idx] == standard_cache[layer][tensor_idx] ) )
def UpperCAmelCase ( self : Tuple ) -> int:
__lowerCAmelCase , __lowerCAmelCase: Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
__lowerCAmelCase: int = 3
__lowerCAmelCase: str = 'multi_label_classification'
__lowerCAmelCase: Dict = input_dict['input_ids']
__lowerCAmelCase: str = input_ids.ne(1 ).to(UpperCAmelCase )
__lowerCAmelCase: int = ids_tensor(
[self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float )
__lowerCAmelCase: Optional[int] = FalconForSequenceClassification(UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
__lowerCAmelCase: List[str] = model(UpperCAmelCase , attention_mask=UpperCAmelCase , labels=UpperCAmelCase )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def UpperCAmelCase ( self : Union[str, Any] ) -> Any:
# Falcon can have different numbers of KV-heads than the number of query heads, so we need
# to override this test to use the right head counts.
for model_class in self.all_generative_model_classes:
__lowerCAmelCase , __lowerCAmelCase: Dict = self.model_tester.prepare_config_and_inputs_for_common()
# If it doesn't support cache, pass the test
if not hasattr(UpperCAmelCase , 'use_cache' ):
return
__lowerCAmelCase: Tuple = model_class(UpperCAmelCase ).to(UpperCAmelCase )
if "use_cache" not in inputs:
__lowerCAmelCase: Optional[Any] = True
__lowerCAmelCase: int = model(**UpperCAmelCase )
# If "past_key_values" is not returned, pass the test (e.g. RWKV uses a different cache name and format)
if "past_key_values" not in outputs:
return
__lowerCAmelCase: List[Any] = (
getattr(UpperCAmelCase , 'decoder_layers' , UpperCAmelCase )
or getattr(UpperCAmelCase , 'num_decoder_layers' , UpperCAmelCase )
or config.num_hidden_layers
)
__lowerCAmelCase: int = getattr(UpperCAmelCase , 'num_kv_heads' , config.num_attention_heads )
__lowerCAmelCase: List[str] = getattr(UpperCAmelCase , 'd_model' , config.hidden_size )
__lowerCAmelCase: List[Any] = embed_dim // num_attention_heads
__lowerCAmelCase: Any = outputs['past_key_values']
self.assertEqual(len(UpperCAmelCase ) , UpperCAmelCase )
__lowerCAmelCase , __lowerCAmelCase: Optional[int] = inputs['input_ids'].shape
for i in range(UpperCAmelCase ):
if config.new_decoder_architecture:
__lowerCAmelCase: int = config.num_attention_heads
elif config.multi_query:
__lowerCAmelCase: Dict = 1
self.assertEqual(len(past_kv[0] ) , 2 ) # K V for the decoder = 2
self.assertEqual(
past_kv[i][0].shape , (batch_size, num_attention_heads, seq_length, per_head_embed_dim) )
self.assertEqual(
past_kv[i][1].shape , (batch_size, num_attention_heads, seq_length, per_head_embed_dim) )
@require_torch
class A_ ( unittest.TestCase ):
@slow
def UpperCAmelCase ( self : Any ) -> int:
__lowerCAmelCase: Optional[int] = AutoTokenizer.from_pretrained('Rocketknight1/falcon-rw-1b' )
__lowerCAmelCase: str = FalconForCausalLM.from_pretrained('Rocketknight1/falcon-rw-1b' )
model.eval()
model.to(UpperCAmelCase )
__lowerCAmelCase: str = tokenizer('My favorite food is' , return_tensors='pt' ).to(UpperCAmelCase )
__lowerCAmelCase: Optional[Any] = (
'My favorite food is pizza. I love it so much that I have a pizza party every year for my birthday.'
)
__lowerCAmelCase: Tuple = model.generate(**UpperCAmelCase , do_sample=UpperCAmelCase , max_new_tokens=1_9 )
__lowerCAmelCase: Dict = tokenizer.batch_decode(UpperCAmelCase )[0]
self.assertEqual(UpperCAmelCase , UpperCAmelCase )
@slow
def UpperCAmelCase ( self : List[Any] ) -> Optional[Any]:
# The big models are way too big for the CI, so we use tiny random models that resemble their
# architectures but with much smaller and fewer layers
for repo in ["Rocketknight1/tiny-random-falcon-7b", "Rocketknight1/tiny-random-falcon-40b"]:
__lowerCAmelCase: str = AutoTokenizer.from_pretrained(UpperCAmelCase )
__lowerCAmelCase: Optional[Any] = FalconForCausalLM.from_pretrained(UpperCAmelCase )
model.eval()
model.to(UpperCAmelCase )
__lowerCAmelCase: Optional[Any] = tokenizer('My favorite food is' , return_tensors='pt' ).to(UpperCAmelCase )
# We just test that these run without errors - the models are randomly initialized
# and so the actual text outputs will be garbage
model.generate(**UpperCAmelCase , do_sample=UpperCAmelCase , max_new_tokens=4 )
model.generate(**UpperCAmelCase , do_sample=UpperCAmelCase , max_new_tokens=4 )
model.generate(**UpperCAmelCase , num_beams=2 , max_new_tokens=4 )
@slow
def UpperCAmelCase ( self : Any ) -> int:
# The big models are way too big for the CI, so we use tiny random models that resemble their
# architectures but with much smaller and fewer layers
with torch.no_grad():
for repo in [
"Rocketknight1/falcon-rw-1b",
"Rocketknight1/tiny-random-falcon-7b",
"Rocketknight1/tiny-random-falcon-40b",
]:
__lowerCAmelCase: Tuple = AutoTokenizer.from_pretrained(UpperCAmelCase )
__lowerCAmelCase: Dict = FalconForCausalLM.from_pretrained(UpperCAmelCase )
model.eval()
model.to(device=UpperCAmelCase )
__lowerCAmelCase: int = tokenizer('My favorite food is' , return_tensors='pt' ).to(UpperCAmelCase )
# Test results are the same with and without cache
__lowerCAmelCase: Any = model.generate(**UpperCAmelCase , do_sample=UpperCAmelCase , max_new_tokens=2_0 , use_cache=UpperCAmelCase )
__lowerCAmelCase: List[str] = model.generate(**UpperCAmelCase , do_sample=UpperCAmelCase , max_new_tokens=2_0 , use_cache=UpperCAmelCase )
self.assertTrue((outputs_cache - outputs_no_cache).sum().item() == 0 )
| 322 |
import importlib
import math
import os
from dataclasses import dataclass
from enum import Enum
from typing import Any, Dict, Optional, Tuple, Union
import flax
import jax.numpy as jnp
from ..utils import BaseOutput
_a = '''scheduler_config.json'''
class A_ ( snake_case__ ):
_lowercase : Optional[Any] = 1
_lowercase : Tuple = 2
_lowercase : Dict = 3
_lowercase : int = 4
_lowercase : Optional[Any] = 5
@dataclass
class A_ ( snake_case__ ):
_lowercase : jnp.ndarray
class A_ :
_lowercase : Optional[int] = SCHEDULER_CONFIG_NAME
_lowercase : Dict = ['dtype']
_lowercase : int = []
_lowercase : Union[str, Any] = True
@classmethod
def UpperCAmelCase ( cls : Union[str, Any] , UpperCAmelCase : Dict[str, Any] = None , UpperCAmelCase : Optional[str] = None , UpperCAmelCase : List[str]=False , **UpperCAmelCase : Optional[int] , ) -> Tuple:
__lowerCAmelCase , __lowerCAmelCase: Optional[Any] = cls.load_config(
pretrained_model_name_or_path=UpperCAmelCase , subfolder=UpperCAmelCase , return_unused_kwargs=UpperCAmelCase , **UpperCAmelCase , )
__lowerCAmelCase , __lowerCAmelCase: Optional[Any] = cls.from_config(UpperCAmelCase , return_unused_kwargs=UpperCAmelCase , **UpperCAmelCase )
if hasattr(UpperCAmelCase , 'create_state' ) and getattr(UpperCAmelCase , 'has_state' , UpperCAmelCase ):
__lowerCAmelCase: Dict = scheduler.create_state()
if return_unused_kwargs:
return scheduler, state, unused_kwargs
return scheduler, state
def UpperCAmelCase ( self : Tuple , UpperCAmelCase : Union[str, os.PathLike] , UpperCAmelCase : bool = False , **UpperCAmelCase : Any ) -> List[str]:
self.save_config(save_directory=UpperCAmelCase , push_to_hub=UpperCAmelCase , **UpperCAmelCase )
@property
def UpperCAmelCase ( self : str ) -> Dict:
return self._get_compatibles()
@classmethod
def UpperCAmelCase ( cls : Optional[int] ) -> Any:
__lowerCAmelCase: Optional[int] = list(set([cls.__name__] + cls._compatibles ) )
__lowerCAmelCase: Dict = importlib.import_module(__name__.split('.' )[0] )
__lowerCAmelCase: Dict = [
getattr(UpperCAmelCase , UpperCAmelCase ) for c in compatible_classes_str if hasattr(UpperCAmelCase , UpperCAmelCase )
]
return compatible_classes
def _a ( SCREAMING_SNAKE_CASE : jnp.ndarray , SCREAMING_SNAKE_CASE : Tuple[int] ) -> jnp.ndarray:
"""simple docstring"""
assert len(SCREAMING_SNAKE_CASE ) >= x.ndim
return jnp.broadcast_to(x.reshape(x.shape + (1,) * (len(SCREAMING_SNAKE_CASE ) - x.ndim) ) , SCREAMING_SNAKE_CASE )
def _a ( SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : Any=0.9_9_9 , SCREAMING_SNAKE_CASE : List[Any]=jnp.floataa ) -> jnp.ndarray:
"""simple docstring"""
def alpha_bar(SCREAMING_SNAKE_CASE : str ):
return math.cos((time_step + 0.0_0_8) / 1.0_0_8 * math.pi / 2 ) ** 2
__lowerCAmelCase: str = []
for i in range(SCREAMING_SNAKE_CASE ):
__lowerCAmelCase: Union[str, Any] = i / num_diffusion_timesteps
__lowerCAmelCase: List[str] = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar(SCREAMING_SNAKE_CASE ) / alpha_bar(SCREAMING_SNAKE_CASE ) , SCREAMING_SNAKE_CASE ) )
return jnp.array(SCREAMING_SNAKE_CASE , dtype=SCREAMING_SNAKE_CASE )
@flax.struct.dataclass
class A_ :
_lowercase : jnp.ndarray
_lowercase : jnp.ndarray
_lowercase : jnp.ndarray
@classmethod
def UpperCAmelCase ( cls : str , UpperCAmelCase : Optional[int] ) -> Any:
__lowerCAmelCase: str = scheduler.config
if config.trained_betas is not None:
__lowerCAmelCase: Tuple = jnp.asarray(config.trained_betas , dtype=scheduler.dtype )
elif config.beta_schedule == "linear":
__lowerCAmelCase: Any = jnp.linspace(config.beta_start , config.beta_end , config.num_train_timesteps , dtype=scheduler.dtype )
elif config.beta_schedule == "scaled_linear":
# this schedule is very specific to the latent diffusion model.
__lowerCAmelCase: List[Any] = (
jnp.linspace(
config.beta_start**0.5 , config.beta_end**0.5 , config.num_train_timesteps , dtype=scheduler.dtype )
** 2
)
elif config.beta_schedule == "squaredcos_cap_v2":
# Glide cosine schedule
__lowerCAmelCase: str = betas_for_alpha_bar(config.num_train_timesteps , dtype=scheduler.dtype )
else:
raise NotImplementedError(
F'''beta_schedule {config.beta_schedule} is not implemented for scheduler {scheduler.__class__.__name__}''' )
__lowerCAmelCase: Optional[Any] = 1.0 - betas
__lowerCAmelCase: Optional[Any] = jnp.cumprod(UpperCAmelCase , axis=0 )
return cls(
alphas=UpperCAmelCase , betas=UpperCAmelCase , alphas_cumprod=UpperCAmelCase , )
def _a ( SCREAMING_SNAKE_CASE : CommonSchedulerState , SCREAMING_SNAKE_CASE : jnp.ndarray , SCREAMING_SNAKE_CASE : jnp.ndarray , SCREAMING_SNAKE_CASE : jnp.ndarray ) -> int:
"""simple docstring"""
__lowerCAmelCase: Optional[int] = state.alphas_cumprod
__lowerCAmelCase: str = alphas_cumprod[timesteps] ** 0.5
__lowerCAmelCase: Any = sqrt_alpha_prod.flatten()
__lowerCAmelCase: Any = broadcast_to_shape_from_left(SCREAMING_SNAKE_CASE , original_samples.shape )
__lowerCAmelCase: Any = (1 - alphas_cumprod[timesteps]) ** 0.5
__lowerCAmelCase: str = sqrt_one_minus_alpha_prod.flatten()
__lowerCAmelCase: str = broadcast_to_shape_from_left(SCREAMING_SNAKE_CASE , original_samples.shape )
return sqrt_alpha_prod, sqrt_one_minus_alpha_prod
def _a ( SCREAMING_SNAKE_CASE : CommonSchedulerState , SCREAMING_SNAKE_CASE : jnp.ndarray , SCREAMING_SNAKE_CASE : jnp.ndarray , SCREAMING_SNAKE_CASE : jnp.ndarray ) -> str:
"""simple docstring"""
__lowerCAmelCase , __lowerCAmelCase: Union[str, Any] = get_sqrt_alpha_prod(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
__lowerCAmelCase: Optional[int] = sqrt_alpha_prod * original_samples + sqrt_one_minus_alpha_prod * noise
return noisy_samples
def _a ( SCREAMING_SNAKE_CASE : CommonSchedulerState , SCREAMING_SNAKE_CASE : jnp.ndarray , SCREAMING_SNAKE_CASE : jnp.ndarray , SCREAMING_SNAKE_CASE : jnp.ndarray ) -> Any:
"""simple docstring"""
__lowerCAmelCase , __lowerCAmelCase: Tuple = get_sqrt_alpha_prod(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
__lowerCAmelCase: int = sqrt_alpha_prod * noise - sqrt_one_minus_alpha_prod * sample
return velocity
| 322 | 1 |
import numpy as np
def _a ( SCREAMING_SNAKE_CASE : Dict , SCREAMING_SNAKE_CASE : List[str] , SCREAMING_SNAKE_CASE : Union[str, Any] , SCREAMING_SNAKE_CASE : Dict , SCREAMING_SNAKE_CASE : Dict ) -> Dict:
"""simple docstring"""
__lowerCAmelCase: int = int(np.ceil((x_end - xa) / h ) )
__lowerCAmelCase: Union[str, Any] = np.zeros((n + 1,) )
__lowerCAmelCase: Any = ya
__lowerCAmelCase: Tuple = xa
for k in range(SCREAMING_SNAKE_CASE ):
__lowerCAmelCase: Optional[int] = f(SCREAMING_SNAKE_CASE , y[k] )
__lowerCAmelCase: Union[str, Any] = f(x + 0.5 * h , y[k] + 0.5 * h * ka )
__lowerCAmelCase: Any = f(x + 0.5 * h , y[k] + 0.5 * h * ka )
__lowerCAmelCase: str = f(x + h , y[k] + h * ka )
__lowerCAmelCase: str = y[k] + (1 / 6) * h * (ka + 2 * ka + 2 * ka + ka)
x += h
return y
if __name__ == "__main__":
import doctest
doctest.testmod()
| 322 |
_a = {
'''A''': ['''B''', '''C''', '''E'''],
'''B''': ['''A''', '''D''', '''E'''],
'''C''': ['''A''', '''F''', '''G'''],
'''D''': ['''B'''],
'''E''': ['''A''', '''B''', '''D'''],
'''F''': ['''C'''],
'''G''': ['''C'''],
}
def _a ( SCREAMING_SNAKE_CASE : dict , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : Any ) -> list[str]:
"""simple docstring"""
__lowerCAmelCase: int = set()
# keep track of all the paths to be checked
__lowerCAmelCase: str = [[start]]
# return path if start is goal
if start == goal:
return [start]
# keeps looping until all possible paths have been checked
while queue:
# pop the first path from the queue
__lowerCAmelCase: str = queue.pop(0 )
# get the last node from the path
__lowerCAmelCase: Union[str, Any] = path[-1]
if node not in explored:
__lowerCAmelCase: Dict = graph[node]
# go through all neighbour nodes, construct a new path and
# push it into the queue
for neighbour in neighbours:
__lowerCAmelCase: Dict = list(SCREAMING_SNAKE_CASE )
new_path.append(SCREAMING_SNAKE_CASE )
queue.append(SCREAMING_SNAKE_CASE )
# return path if neighbour is goal
if neighbour == goal:
return new_path
# mark node as explored
explored.add(SCREAMING_SNAKE_CASE )
# in case there's no path between the 2 nodes
return []
def _a ( SCREAMING_SNAKE_CASE : dict , SCREAMING_SNAKE_CASE : Union[str, Any] , SCREAMING_SNAKE_CASE : Any ) -> int:
"""simple docstring"""
if not graph or start not in graph or target not in graph:
return -1
if start == target:
return 0
__lowerCAmelCase: Optional[int] = [start]
__lowerCAmelCase: Dict = set(SCREAMING_SNAKE_CASE )
# Keep tab on distances from `start` node.
__lowerCAmelCase: Optional[int] = {start: 0, target: -1}
while queue:
__lowerCAmelCase: Any = queue.pop(0 )
if node == target:
__lowerCAmelCase: Optional[int] = (
dist[node] if dist[target] == -1 else min(dist[target] , dist[node] )
)
for adjacent in graph[node]:
if adjacent not in visited:
visited.add(SCREAMING_SNAKE_CASE )
queue.append(SCREAMING_SNAKE_CASE )
__lowerCAmelCase: Union[str, Any] = dist[node] + 1
return dist[target]
if __name__ == "__main__":
print(bfs_shortest_path(demo_graph, '''G''', '''D''')) # returns ['G', 'C', 'A', 'B', 'D']
print(bfs_shortest_path_distance(demo_graph, '''G''', '''D''')) # returns 4
| 322 | 1 |
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_a = logging.get_logger(__name__)
_a = {
'''facebook/wav2vec2-base-960h''': '''https://huggingface.co/facebook/wav2vec2-base-960h/resolve/main/config.json''',
# See all Wav2Vec2 models at https://huggingface.co/models?filter=wav2vec2
}
class A_ ( snake_case__ ):
_lowercase : Dict = 'wav2vec2'
def __init__( self : str , UpperCAmelCase : Any=3_2 , UpperCAmelCase : Optional[Any]=7_6_8 , UpperCAmelCase : Any=1_2 , UpperCAmelCase : Optional[int]=1_2 , UpperCAmelCase : Any=3_0_7_2 , UpperCAmelCase : Optional[Any]="gelu" , UpperCAmelCase : List[str]=0.1 , UpperCAmelCase : List[str]=0.1 , UpperCAmelCase : Dict=0.1 , UpperCAmelCase : str=0.0 , UpperCAmelCase : str=0.0 , UpperCAmelCase : Tuple=0.1 , UpperCAmelCase : int=0.1 , UpperCAmelCase : Dict=0.02 , UpperCAmelCase : Dict=1E-5 , UpperCAmelCase : Optional[Any]="group" , UpperCAmelCase : Union[str, Any]="gelu" , UpperCAmelCase : Any=(5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2) , UpperCAmelCase : Optional[Any]=(5, 2, 2, 2, 2, 2, 2) , UpperCAmelCase : Optional[Any]=(1_0, 3, 3, 3, 3, 2, 2) , UpperCAmelCase : List[Any]=False , UpperCAmelCase : Union[str, Any]=1_2_8 , UpperCAmelCase : List[str]=1_6 , UpperCAmelCase : str=False , UpperCAmelCase : Union[str, Any]=True , UpperCAmelCase : Dict=0.05 , UpperCAmelCase : List[str]=1_0 , UpperCAmelCase : List[str]=2 , UpperCAmelCase : Optional[Any]=0.0 , UpperCAmelCase : Union[str, Any]=1_0 , UpperCAmelCase : List[Any]=0 , UpperCAmelCase : List[Any]=3_2_0 , UpperCAmelCase : Union[str, Any]=2 , UpperCAmelCase : Optional[Any]=0.1 , UpperCAmelCase : Union[str, Any]=1_0_0 , UpperCAmelCase : Optional[Any]=2_5_6 , UpperCAmelCase : Optional[int]=2_5_6 , UpperCAmelCase : str=0.1 , UpperCAmelCase : Dict="sum" , UpperCAmelCase : Union[str, Any]=False , UpperCAmelCase : int=False , UpperCAmelCase : Any=2_5_6 , UpperCAmelCase : Optional[Any]=(5_1_2, 5_1_2, 5_1_2, 5_1_2, 1_5_0_0) , UpperCAmelCase : Tuple=(5, 3, 3, 1, 1) , UpperCAmelCase : Tuple=(1, 2, 3, 1, 1) , UpperCAmelCase : Optional[Any]=5_1_2 , UpperCAmelCase : Optional[int]=0 , UpperCAmelCase : Optional[int]=1 , UpperCAmelCase : Any=2 , UpperCAmelCase : int=False , UpperCAmelCase : List[Any]=3 , UpperCAmelCase : Dict=2 , UpperCAmelCase : Tuple=3 , UpperCAmelCase : List[str]=None , UpperCAmelCase : List[str]=None , **UpperCAmelCase : Union[str, Any] , ) -> Optional[int]:
super().__init__(**UpperCAmelCase , pad_token_id=UpperCAmelCase , bos_token_id=UpperCAmelCase , eos_token_id=UpperCAmelCase )
__lowerCAmelCase: Optional[int] = hidden_size
__lowerCAmelCase: Any = feat_extract_norm
__lowerCAmelCase: Dict = feat_extract_activation
__lowerCAmelCase: Any = list(UpperCAmelCase )
__lowerCAmelCase: int = list(UpperCAmelCase )
__lowerCAmelCase: Optional[int] = list(UpperCAmelCase )
__lowerCAmelCase: Tuple = conv_bias
__lowerCAmelCase: List[str] = num_conv_pos_embeddings
__lowerCAmelCase: Union[str, Any] = num_conv_pos_embedding_groups
__lowerCAmelCase: List[Any] = len(self.conv_dim )
__lowerCAmelCase: str = num_hidden_layers
__lowerCAmelCase: Tuple = intermediate_size
__lowerCAmelCase: List[Any] = hidden_act
__lowerCAmelCase: Dict = num_attention_heads
__lowerCAmelCase: Optional[int] = hidden_dropout
__lowerCAmelCase: Union[str, Any] = attention_dropout
__lowerCAmelCase: List[str] = activation_dropout
__lowerCAmelCase: Union[str, Any] = feat_proj_dropout
__lowerCAmelCase: Optional[Any] = final_dropout
__lowerCAmelCase: Optional[Any] = layerdrop
__lowerCAmelCase: Optional[int] = layer_norm_eps
__lowerCAmelCase: Optional[Any] = initializer_range
__lowerCAmelCase: Any = vocab_size
__lowerCAmelCase: Dict = do_stable_layer_norm
__lowerCAmelCase: Optional[int] = use_weighted_layer_sum
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
'Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =='
' `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ='
F''' {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,'''
F''' `len(config.conv_kernel) = {len(self.conv_kernel )}`.''' )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
__lowerCAmelCase: List[str] = apply_spec_augment
__lowerCAmelCase: List[str] = mask_time_prob
__lowerCAmelCase: List[Any] = mask_time_length
__lowerCAmelCase: Union[str, Any] = mask_time_min_masks
__lowerCAmelCase: Tuple = mask_feature_prob
__lowerCAmelCase: Optional[int] = mask_feature_length
__lowerCAmelCase: str = mask_feature_min_masks
# parameters for pretraining with codevector quantized representations
__lowerCAmelCase: Union[str, Any] = num_codevectors_per_group
__lowerCAmelCase: Dict = num_codevector_groups
__lowerCAmelCase: List[str] = contrastive_logits_temperature
__lowerCAmelCase: Optional[Any] = feat_quantizer_dropout
__lowerCAmelCase: Tuple = num_negatives
__lowerCAmelCase: Tuple = codevector_dim
__lowerCAmelCase: List[Any] = proj_codevector_dim
__lowerCAmelCase: List[str] = diversity_loss_weight
# ctc loss
__lowerCAmelCase: Union[str, Any] = ctc_loss_reduction
__lowerCAmelCase: Union[str, Any] = ctc_zero_infinity
# adapter
__lowerCAmelCase: Tuple = add_adapter
__lowerCAmelCase: Any = adapter_kernel_size
__lowerCAmelCase: List[str] = adapter_stride
__lowerCAmelCase: Optional[Any] = num_adapter_layers
__lowerCAmelCase: Union[str, Any] = output_hidden_size or hidden_size
__lowerCAmelCase: Dict = adapter_attn_dim
# SequenceClassification-specific parameter. Feel free to ignore for other classes.
__lowerCAmelCase: List[Any] = classifier_proj_size
# XVector-specific parameters. Feel free to ignore for other classes.
__lowerCAmelCase: List[str] = list(UpperCAmelCase )
__lowerCAmelCase: List[str] = list(UpperCAmelCase )
__lowerCAmelCase: Dict = list(UpperCAmelCase )
__lowerCAmelCase: Dict = xvector_output_dim
@property
def UpperCAmelCase ( self : Any ) -> Optional[Any]:
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 322 |
import warnings
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class A_ ( snake_case__ ):
_lowercase : int = ['image_processor', 'tokenizer']
_lowercase : Union[str, Any] = 'LayoutLMv3ImageProcessor'
_lowercase : List[str] = ('LayoutLMv3Tokenizer', 'LayoutLMv3TokenizerFast')
def __init__( self : Any , UpperCAmelCase : Dict=None , UpperCAmelCase : Tuple=None , **UpperCAmelCase : Optional[Any] ) -> str:
__lowerCAmelCase: str = None
if "feature_extractor" in kwargs:
warnings.warn(
'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'
' instead.' , UpperCAmelCase , )
__lowerCAmelCase: List[Any] = kwargs.pop('feature_extractor' )
__lowerCAmelCase: Tuple = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('You need to specify an `image_processor`.' )
if tokenizer is None:
raise ValueError('You need to specify a `tokenizer`.' )
super().__init__(UpperCAmelCase , UpperCAmelCase )
def __call__( self : Optional[int] , UpperCAmelCase : List[Any] , UpperCAmelCase : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , UpperCAmelCase : Optional[Union[PreTokenizedInput, List[PreTokenizedInput]]] = None , UpperCAmelCase : Union[List[List[int]], List[List[List[int]]]] = None , UpperCAmelCase : Optional[Union[List[int], List[List[int]]]] = None , UpperCAmelCase : bool = True , UpperCAmelCase : Union[bool, str, PaddingStrategy] = False , UpperCAmelCase : Union[bool, str, TruncationStrategy] = None , UpperCAmelCase : Optional[int] = None , UpperCAmelCase : int = 0 , UpperCAmelCase : Optional[int] = None , UpperCAmelCase : Optional[bool] = None , UpperCAmelCase : Optional[bool] = None , UpperCAmelCase : bool = False , UpperCAmelCase : bool = False , UpperCAmelCase : bool = False , UpperCAmelCase : bool = False , UpperCAmelCase : bool = True , UpperCAmelCase : Optional[Union[str, TensorType]] = None , **UpperCAmelCase : Tuple , ) -> BatchEncoding:
# verify input
if self.image_processor.apply_ocr and (boxes is not None):
raise ValueError(
'You cannot provide bounding boxes if you initialized the image processor with apply_ocr set to True.' )
if self.image_processor.apply_ocr and (word_labels is not None):
raise ValueError(
'You cannot provide word labels if you initialized the image processor with apply_ocr set to True.' )
# first, apply the image processor
__lowerCAmelCase: str = self.image_processor(images=UpperCAmelCase , return_tensors=UpperCAmelCase )
# second, apply the tokenizer
if text is not None and self.image_processor.apply_ocr and text_pair is None:
if isinstance(UpperCAmelCase , UpperCAmelCase ):
__lowerCAmelCase: Tuple = [text] # add batch dimension (as the image processor always adds a batch dimension)
__lowerCAmelCase: List[str] = features['words']
__lowerCAmelCase: List[Any] = self.tokenizer(
text=text if text is not None else features['words'] , text_pair=text_pair if text_pair is not None else None , boxes=boxes if boxes is not None else features['boxes'] , word_labels=UpperCAmelCase , add_special_tokens=UpperCAmelCase , padding=UpperCAmelCase , truncation=UpperCAmelCase , max_length=UpperCAmelCase , stride=UpperCAmelCase , pad_to_multiple_of=UpperCAmelCase , return_token_type_ids=UpperCAmelCase , return_attention_mask=UpperCAmelCase , return_overflowing_tokens=UpperCAmelCase , return_special_tokens_mask=UpperCAmelCase , return_offsets_mapping=UpperCAmelCase , return_length=UpperCAmelCase , verbose=UpperCAmelCase , return_tensors=UpperCAmelCase , **UpperCAmelCase , )
# add pixel values
__lowerCAmelCase: Tuple = features.pop('pixel_values' )
if return_overflowing_tokens is True:
__lowerCAmelCase: int = self.get_overflowing_images(UpperCAmelCase , encoded_inputs['overflow_to_sample_mapping'] )
__lowerCAmelCase: str = images
return encoded_inputs
def UpperCAmelCase ( self : Optional[int] , UpperCAmelCase : List[Any] , UpperCAmelCase : Union[str, Any] ) -> List[str]:
# in case there's an overflow, ensure each `input_ids` sample is mapped to its corresponding image
__lowerCAmelCase: str = []
for sample_idx in overflow_to_sample_mapping:
images_with_overflow.append(images[sample_idx] )
if len(UpperCAmelCase ) != len(UpperCAmelCase ):
raise ValueError(
'Expected length of images to be the same as the length of `overflow_to_sample_mapping`, but got'
F''' {len(UpperCAmelCase )} and {len(UpperCAmelCase )}''' )
return images_with_overflow
def UpperCAmelCase ( self : Optional[int] , *UpperCAmelCase : Optional[int] , **UpperCAmelCase : Dict ) -> Union[str, Any]:
return self.tokenizer.batch_decode(*UpperCAmelCase , **UpperCAmelCase )
def UpperCAmelCase ( self : Any , *UpperCAmelCase : Dict , **UpperCAmelCase : Any ) -> List[str]:
return self.tokenizer.decode(*UpperCAmelCase , **UpperCAmelCase )
@property
def UpperCAmelCase ( self : Union[str, Any] ) -> str:
return ["input_ids", "bbox", "attention_mask", "pixel_values"]
@property
def UpperCAmelCase ( self : str ) -> Union[str, Any]:
warnings.warn(
'`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.' , UpperCAmelCase , )
return self.image_processor_class
@property
def UpperCAmelCase ( self : Union[str, Any] ) -> Optional[Any]:
warnings.warn(
'`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.' , UpperCAmelCase , )
return self.image_processor
| 322 | 1 |
import collections
import inspect
import unittest
from typing import Dict, List, Tuple
from transformers import MaskFormerSwinConfig
from transformers.testing_utils import require_torch, require_torch_multi_gpu, torch_device
from transformers.utils import is_torch_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import MaskFormerSwinBackbone
from transformers.models.maskformer import MaskFormerSwinModel
class A_ :
def __init__( self : Union[str, Any] , UpperCAmelCase : int , UpperCAmelCase : Optional[Any]=1_3 , UpperCAmelCase : Dict=3_2 , UpperCAmelCase : Any=2 , UpperCAmelCase : List[str]=3 , UpperCAmelCase : List[Any]=1_6 , UpperCAmelCase : List[Any]=[1, 2, 1] , UpperCAmelCase : Tuple=[2, 2, 4] , UpperCAmelCase : Dict=2 , UpperCAmelCase : List[str]=2.0 , UpperCAmelCase : Optional[int]=True , UpperCAmelCase : Optional[int]=0.0 , UpperCAmelCase : Dict=0.0 , UpperCAmelCase : Union[str, Any]=0.1 , UpperCAmelCase : int="gelu" , UpperCAmelCase : Optional[Any]=False , UpperCAmelCase : Tuple=True , UpperCAmelCase : List[Any]=0.02 , UpperCAmelCase : Union[str, Any]=1E-5 , UpperCAmelCase : int=True , UpperCAmelCase : Dict=None , UpperCAmelCase : List[Any]=True , UpperCAmelCase : List[str]=1_0 , UpperCAmelCase : Tuple=8 , UpperCAmelCase : Tuple=["stage1", "stage2", "stage3"] , UpperCAmelCase : List[str]=[1, 2, 3] , ) -> List[str]:
__lowerCAmelCase: Any = parent
__lowerCAmelCase: List[Any] = batch_size
__lowerCAmelCase: int = image_size
__lowerCAmelCase: int = patch_size
__lowerCAmelCase: Optional[Any] = num_channels
__lowerCAmelCase: List[str] = embed_dim
__lowerCAmelCase: int = depths
__lowerCAmelCase: Tuple = num_heads
__lowerCAmelCase: List[Any] = window_size
__lowerCAmelCase: Dict = mlp_ratio
__lowerCAmelCase: Union[str, Any] = qkv_bias
__lowerCAmelCase: Optional[int] = hidden_dropout_prob
__lowerCAmelCase: Dict = attention_probs_dropout_prob
__lowerCAmelCase: Any = drop_path_rate
__lowerCAmelCase: Optional[int] = hidden_act
__lowerCAmelCase: Optional[int] = use_absolute_embeddings
__lowerCAmelCase: str = patch_norm
__lowerCAmelCase: Optional[int] = layer_norm_eps
__lowerCAmelCase: Optional[int] = initializer_range
__lowerCAmelCase: str = is_training
__lowerCAmelCase: Any = scope
__lowerCAmelCase: Union[str, Any] = use_labels
__lowerCAmelCase: Any = type_sequence_label_size
__lowerCAmelCase: int = encoder_stride
__lowerCAmelCase: Dict = out_features
__lowerCAmelCase: int = out_indices
def UpperCAmelCase ( self : int ) -> List[Any]:
__lowerCAmelCase: Union[str, Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__lowerCAmelCase: Dict = None
if self.use_labels:
__lowerCAmelCase: Any = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__lowerCAmelCase: Tuple = self.get_config()
return config, pixel_values, labels
def UpperCAmelCase ( self : str ) -> Any:
return MaskFormerSwinConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , embed_dim=self.embed_dim , depths=self.depths , num_heads=self.num_heads , window_size=self.window_size , mlp_ratio=self.mlp_ratio , qkv_bias=self.qkv_bias , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , drop_path_rate=self.drop_path_rate , hidden_act=self.hidden_act , use_absolute_embeddings=self.use_absolute_embeddings , path_norm=self.patch_norm , layer_norm_eps=self.layer_norm_eps , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , out_features=self.out_features , out_indices=self.out_indices , )
def UpperCAmelCase ( self : Union[str, Any] , UpperCAmelCase : Any , UpperCAmelCase : Dict , UpperCAmelCase : Optional[int] ) -> int:
__lowerCAmelCase: Tuple = MaskFormerSwinModel(config=UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
__lowerCAmelCase: Optional[Any] = model(UpperCAmelCase )
__lowerCAmelCase: Tuple = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1))
__lowerCAmelCase: Any = int(config.embed_dim * 2 ** (len(config.depths ) - 1) )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, expected_seq_len, expected_dim) )
def UpperCAmelCase ( self : List[Any] , UpperCAmelCase : Optional[int] , UpperCAmelCase : List[Any] , UpperCAmelCase : Tuple ) -> Optional[Any]:
__lowerCAmelCase: List[str] = MaskFormerSwinBackbone(config=UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
__lowerCAmelCase: List[Any] = model(UpperCAmelCase )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [1_3, 1_6, 1_6, 1_6] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , [1_6, 3_2, 6_4] )
# verify ValueError
with self.parent.assertRaises(UpperCAmelCase ):
__lowerCAmelCase: Optional[Any] = ['stem']
__lowerCAmelCase: Optional[Any] = MaskFormerSwinBackbone(config=UpperCAmelCase )
def UpperCAmelCase ( self : Optional[Any] ) -> Any:
__lowerCAmelCase: Optional[int] = self.prepare_config_and_inputs()
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase: Union[str, Any] = config_and_inputs
__lowerCAmelCase: Tuple = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class A_ ( snake_case__ , snake_case__ , unittest.TestCase ):
_lowercase : int = (
(
MaskFormerSwinModel,
MaskFormerSwinBackbone,
)
if is_torch_available()
else ()
)
_lowercase : Any = {'feature-extraction': MaskFormerSwinModel} if is_torch_available() else {}
_lowercase : Optional[int] = False
_lowercase : int = False
_lowercase : Optional[Any] = False
_lowercase : List[Any] = False
_lowercase : Any = False
def UpperCAmelCase ( self : int ) -> Optional[int]:
__lowerCAmelCase: List[Any] = MaskFormerSwinModelTester(self )
__lowerCAmelCase: List[str] = ConfigTester(self , config_class=UpperCAmelCase , embed_dim=3_7 )
@require_torch_multi_gpu
@unittest.skip(
reason=(
'`MaskFormerSwinModel` outputs `hidden_states_spatial_dimensions` which doesn\'t work well with'
' `nn.DataParallel`'
) )
def UpperCAmelCase ( self : Optional[Any] ) -> List[str]:
pass
def UpperCAmelCase ( self : Union[str, Any] ) -> int:
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def UpperCAmelCase ( self : Any ) -> Any:
return
def UpperCAmelCase ( self : Tuple ) -> Dict:
__lowerCAmelCase: Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCAmelCase )
def UpperCAmelCase ( self : Any ) -> Union[str, Any]:
__lowerCAmelCase: Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*UpperCAmelCase )
@unittest.skip('Swin does not use inputs_embeds' )
def UpperCAmelCase ( self : List[str] ) -> str:
pass
@unittest.skip('Swin does not support feedforward chunking' )
def UpperCAmelCase ( self : List[str] ) -> Union[str, Any]:
pass
def UpperCAmelCase ( self : List[Any] ) -> Union[str, Any]:
__lowerCAmelCase , __lowerCAmelCase: Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowerCAmelCase: Union[str, Any] = model_class(UpperCAmelCase )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
__lowerCAmelCase: List[Any] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(UpperCAmelCase , nn.Linear ) )
def UpperCAmelCase ( self : str ) -> int:
__lowerCAmelCase , __lowerCAmelCase: Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowerCAmelCase: str = model_class(UpperCAmelCase )
__lowerCAmelCase: str = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__lowerCAmelCase: Tuple = [*signature.parameters.keys()]
__lowerCAmelCase: int = ['pixel_values']
self.assertListEqual(arg_names[:1] , UpperCAmelCase )
@unittest.skip(reason='MaskFormerSwin is only used as backbone and doesn\'t support output_attentions' )
def UpperCAmelCase ( self : Union[str, Any] ) -> Dict:
pass
@unittest.skip(reason='MaskFormerSwin is only used as an internal backbone' )
def UpperCAmelCase ( self : Tuple ) -> List[Any]:
pass
def UpperCAmelCase ( self : Optional[int] , UpperCAmelCase : Tuple , UpperCAmelCase : Any , UpperCAmelCase : Any , UpperCAmelCase : Optional[Any] ) -> Any:
__lowerCAmelCase: Optional[Any] = model_class(UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
with torch.no_grad():
__lowerCAmelCase: Tuple = model(**self._prepare_for_class(UpperCAmelCase , UpperCAmelCase ) )
__lowerCAmelCase: Optional[int] = outputs.hidden_states
__lowerCAmelCase: str = getattr(
self.model_tester , 'expected_num_hidden_layers' , len(self.model_tester.depths ) + 1 )
self.assertEqual(len(UpperCAmelCase ) , UpperCAmelCase )
# Swin has a different seq_length
__lowerCAmelCase: int = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
__lowerCAmelCase: Union[str, Any] = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
def UpperCAmelCase ( self : int ) -> Dict:
__lowerCAmelCase , __lowerCAmelCase: List[str] = self.model_tester.prepare_config_and_inputs_for_common()
__lowerCAmelCase: List[Any] = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
for model_class in self.all_model_classes:
__lowerCAmelCase: Dict = True
self.check_hidden_states_output(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__lowerCAmelCase: Optional[int] = True
self.check_hidden_states_output(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
def UpperCAmelCase ( self : List[str] ) -> Tuple:
__lowerCAmelCase , __lowerCAmelCase: Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
__lowerCAmelCase: List[Any] = 3
__lowerCAmelCase: List[Any] = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
__lowerCAmelCase: Any = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
__lowerCAmelCase: List[str] = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0])
__lowerCAmelCase: str = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1])
for model_class in self.all_model_classes:
__lowerCAmelCase: Optional[int] = True
self.check_hidden_states_output(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , (padded_height, padded_width) )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__lowerCAmelCase: Any = True
self.check_hidden_states_output(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , (padded_height, padded_width) )
@unittest.skip(reason='MaskFormerSwin doesn\'t have pretrained checkpoints' )
def UpperCAmelCase ( self : Optional[Any] ) -> Tuple:
pass
@unittest.skip(reason='This will be fixed once MaskFormerSwin is replaced by native Swin' )
def UpperCAmelCase ( self : str ) -> Union[str, Any]:
pass
@unittest.skip(reason='This will be fixed once MaskFormerSwin is replaced by native Swin' )
def UpperCAmelCase ( self : str ) -> Any:
pass
def UpperCAmelCase ( self : int ) -> Union[str, Any]:
__lowerCAmelCase , __lowerCAmelCase: Any = self.model_tester.prepare_config_and_inputs_for_common()
def set_nan_tensor_to_zero(UpperCAmelCase : List[Any] ):
__lowerCAmelCase: str = 0
return t
def check_equivalence(UpperCAmelCase : Optional[int] , UpperCAmelCase : int , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : Dict={} ):
with torch.no_grad():
__lowerCAmelCase: Optional[Any] = model(**UpperCAmelCase , return_dict=UpperCAmelCase , **UpperCAmelCase )
__lowerCAmelCase: List[str] = model(**UpperCAmelCase , return_dict=UpperCAmelCase , **UpperCAmelCase ).to_tuple()
def recursive_check(UpperCAmelCase : int , UpperCAmelCase : List[Any] ):
if isinstance(UpperCAmelCase , (List, Tuple) ):
for tuple_iterable_value, dict_iterable_value in zip(UpperCAmelCase , UpperCAmelCase ):
recursive_check(UpperCAmelCase , UpperCAmelCase )
elif isinstance(UpperCAmelCase , UpperCAmelCase ):
for tuple_iterable_value, dict_iterable_value in zip(
tuple_object.values() , dict_object.values() ):
recursive_check(UpperCAmelCase , UpperCAmelCase )
elif tuple_object is None:
return
else:
self.assertTrue(
torch.allclose(
set_nan_tensor_to_zero(UpperCAmelCase ) , set_nan_tensor_to_zero(UpperCAmelCase ) , atol=1E-5 ) , msg=(
'Tuple and dict output are not equal. Difference:'
F''' {torch.max(torch.abs(tuple_object - dict_object ) )}. Tuple has `nan`:'''
F''' {torch.isnan(UpperCAmelCase ).any()} and `inf`: {torch.isinf(UpperCAmelCase )}. Dict has'''
F''' `nan`: {torch.isnan(UpperCAmelCase ).any()} and `inf`: {torch.isinf(UpperCAmelCase )}.'''
) , )
recursive_check(UpperCAmelCase , UpperCAmelCase )
for model_class in self.all_model_classes:
__lowerCAmelCase: Optional[Any] = model_class(UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
__lowerCAmelCase: Optional[int] = self._prepare_for_class(UpperCAmelCase , UpperCAmelCase )
__lowerCAmelCase: int = self._prepare_for_class(UpperCAmelCase , UpperCAmelCase )
check_equivalence(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
__lowerCAmelCase: Union[str, Any] = self._prepare_for_class(UpperCAmelCase , UpperCAmelCase , return_labels=UpperCAmelCase )
__lowerCAmelCase: List[str] = self._prepare_for_class(UpperCAmelCase , UpperCAmelCase , return_labels=UpperCAmelCase )
check_equivalence(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
__lowerCAmelCase: List[str] = self._prepare_for_class(UpperCAmelCase , UpperCAmelCase )
__lowerCAmelCase: Tuple = self._prepare_for_class(UpperCAmelCase , UpperCAmelCase )
check_equivalence(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , {'output_hidden_states': True} )
__lowerCAmelCase: Tuple = self._prepare_for_class(UpperCAmelCase , UpperCAmelCase , return_labels=UpperCAmelCase )
__lowerCAmelCase: str = self._prepare_for_class(UpperCAmelCase , UpperCAmelCase , return_labels=UpperCAmelCase )
check_equivalence(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , {'output_hidden_states': True} )
@require_torch
class A_ ( unittest.TestCase , snake_case__ ):
_lowercase : Tuple = (MaskFormerSwinBackbone,) if is_torch_available() else ()
_lowercase : int = MaskFormerSwinConfig
def UpperCAmelCase ( self : Any ) -> Union[str, Any]:
__lowerCAmelCase: Tuple = MaskFormerSwinModelTester(self )
def UpperCAmelCase ( self : List[Any] ) -> Optional[int]:
__lowerCAmelCase , __lowerCAmelCase: Dict = self.model_tester.prepare_config_and_inputs_for_common()
__lowerCAmelCase: int = inputs_dict['pixel_values'].shape[0]
for backbone_class in self.all_model_classes:
__lowerCAmelCase: Dict = backbone_class(UpperCAmelCase )
backbone.to(UpperCAmelCase )
backbone.eval()
__lowerCAmelCase: Tuple = backbone(**UpperCAmelCase )
# Test default outputs and verify feature maps
self.assertIsInstance(outputs.feature_maps , UpperCAmelCase )
self.assertTrue(len(outputs.feature_maps ) == len(backbone.channels ) )
for feature_map, n_channels in zip(outputs.feature_maps , backbone.channels ):
self.assertTrue(feature_map.shape[:2] , (batch_size, n_channels) )
self.assertIsNone(outputs.hidden_states )
self.assertIsNone(outputs.attentions )
# Test output_hidden_states=True
__lowerCAmelCase: Any = backbone(**UpperCAmelCase , output_hidden_states=UpperCAmelCase )
self.assertIsNotNone(outputs.hidden_states )
self.assertTrue(len(outputs.hidden_states ) , len(backbone.stage_names ) )
# We skip the stem layer
for hidden_states, n_channels in zip(outputs.hidden_states[1:] , backbone.channels ):
for hidden_state in hidden_states:
# Hidden states are in the format (batch_size, (height * width), n_channels)
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase: Dict = hidden_state.shape
self.assertTrue((h_batch_size, h_n_channels) , (batch_size, n_channels) )
# Test output_attentions=True
if self.has_attentions:
__lowerCAmelCase: Optional[int] = backbone(**UpperCAmelCase , output_attentions=UpperCAmelCase )
self.assertIsNotNone(outputs.attentions )
| 322 |
import argparse
from pathlib import Path
import torch
from packaging import version
from torch.onnx import export
from diffusers import AutoencoderKL
_a = version.parse(version.parse(torch.__version__).base_version) < version.parse('''1.11''')
def _a ( SCREAMING_SNAKE_CASE : Any , SCREAMING_SNAKE_CASE : tuple , SCREAMING_SNAKE_CASE : Path , SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : List[Any] , SCREAMING_SNAKE_CASE : Union[str, Any] , SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : Optional[int]=False , ) -> str:
"""simple docstring"""
output_path.parent.mkdir(parents=SCREAMING_SNAKE_CASE , exist_ok=SCREAMING_SNAKE_CASE )
# PyTorch deprecated the `enable_onnx_checker` and `use_external_data_format` arguments in v1.11,
# so we check the torch version for backwards compatibility
if is_torch_less_than_1_11:
export(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , f=output_path.as_posix() , input_names=SCREAMING_SNAKE_CASE , output_names=SCREAMING_SNAKE_CASE , dynamic_axes=SCREAMING_SNAKE_CASE , do_constant_folding=SCREAMING_SNAKE_CASE , use_external_data_format=SCREAMING_SNAKE_CASE , enable_onnx_checker=SCREAMING_SNAKE_CASE , opset_version=SCREAMING_SNAKE_CASE , )
else:
export(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , f=output_path.as_posix() , input_names=SCREAMING_SNAKE_CASE , output_names=SCREAMING_SNAKE_CASE , dynamic_axes=SCREAMING_SNAKE_CASE , do_constant_folding=SCREAMING_SNAKE_CASE , opset_version=SCREAMING_SNAKE_CASE , )
@torch.no_grad()
def _a ( SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : bool = False ) -> Union[str, Any]:
"""simple docstring"""
__lowerCAmelCase: List[Any] = torch.floataa if fpaa else torch.floataa
if fpaa and torch.cuda.is_available():
__lowerCAmelCase: str = 'cuda'
elif fpaa and not torch.cuda.is_available():
raise ValueError('`float16` model export is only supported on GPUs with CUDA' )
else:
__lowerCAmelCase: Dict = 'cpu'
__lowerCAmelCase: Optional[int] = Path(SCREAMING_SNAKE_CASE )
# VAE DECODER
__lowerCAmelCase: Optional[Any] = AutoencoderKL.from_pretrained(model_path + '/vae' )
__lowerCAmelCase: Union[str, Any] = vae_decoder.config.latent_channels
# forward only through the decoder part
__lowerCAmelCase: Any = vae_decoder.decode
onnx_export(
SCREAMING_SNAKE_CASE , model_args=(
torch.randn(1 , SCREAMING_SNAKE_CASE , 25 , 25 ).to(device=SCREAMING_SNAKE_CASE , dtype=SCREAMING_SNAKE_CASE ),
False,
) , output_path=output_path / 'vae_decoder' / 'model.onnx' , ordered_input_names=['latent_sample', 'return_dict'] , output_names=['sample'] , dynamic_axes={
'latent_sample': {0: 'batch', 1: 'channels', 2: 'height', 3: 'width'},
} , opset=SCREAMING_SNAKE_CASE , )
del vae_decoder
if __name__ == "__main__":
_a = argparse.ArgumentParser()
parser.add_argument(
'''--model_path''',
type=str,
required=True,
help='''Path to the `diffusers` checkpoint to convert (either a local directory or on the Hub).''',
)
parser.add_argument('''--output_path''', type=str, required=True, help='''Path to the output model.''')
parser.add_argument(
'''--opset''',
default=1_4,
type=int,
help='''The version of the ONNX operator set to use.''',
)
parser.add_argument('''--fp16''', action='''store_true''', default=False, help='''Export the models in `float16` mode''')
_a = parser.parse_args()
print(args.output_path)
convert_models(args.model_path, args.output_path, args.opset, args.fpaa)
print('''SD: Done: ONNX''')
| 322 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_a = logging.get_logger(__name__)
_a = {
'''facebook/s2t-wav2vec2-large-en-de''': (
'''https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/config.json'''
),
# See all Speech2Text models at https://huggingface.co/models?filter=speech2text2
}
class A_ ( snake_case__ ):
_lowercase : Union[str, Any] = 'speech_to_text_2'
_lowercase : List[Any] = ['past_key_values']
_lowercase : Any = {'num_attention_heads': 'decoder_attention_heads', 'hidden_size': 'd_model'}
def __init__( self : List[str] , UpperCAmelCase : List[Any]=1_0_0_0_0 , UpperCAmelCase : Optional[int]=6 , UpperCAmelCase : Optional[Any]=2_0_4_8 , UpperCAmelCase : int=4 , UpperCAmelCase : Tuple=0.0 , UpperCAmelCase : Tuple=True , UpperCAmelCase : Optional[Any]="relu" , UpperCAmelCase : Optional[Any]=2_5_6 , UpperCAmelCase : Optional[int]=0.1 , UpperCAmelCase : Any=0.0 , UpperCAmelCase : Optional[int]=0.0 , UpperCAmelCase : int=0.02 , UpperCAmelCase : Any=2 , UpperCAmelCase : int=True , UpperCAmelCase : List[str]=1 , UpperCAmelCase : Tuple=0 , UpperCAmelCase : Any=2 , UpperCAmelCase : Tuple=1_0_2_4 , **UpperCAmelCase : List[str] , ) -> Optional[int]:
__lowerCAmelCase: List[Any] = vocab_size
__lowerCAmelCase: List[str] = d_model
__lowerCAmelCase: List[str] = decoder_ffn_dim
__lowerCAmelCase: Optional[Any] = decoder_layers
__lowerCAmelCase: Optional[Any] = decoder_attention_heads
__lowerCAmelCase: List[Any] = dropout
__lowerCAmelCase: Union[str, Any] = attention_dropout
__lowerCAmelCase: str = activation_dropout
__lowerCAmelCase: str = activation_function
__lowerCAmelCase: Tuple = init_std
__lowerCAmelCase: Optional[Any] = decoder_layerdrop
__lowerCAmelCase: str = use_cache
__lowerCAmelCase: Dict = decoder_layers
__lowerCAmelCase: List[Any] = scale_embedding # scale factor will be sqrt(d_model) if True
__lowerCAmelCase: str = max_target_positions
super().__init__(
pad_token_id=UpperCAmelCase , bos_token_id=UpperCAmelCase , eos_token_id=UpperCAmelCase , decoder_start_token_id=UpperCAmelCase , **UpperCAmelCase , )
| 322 |
def _a ( SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : list[list[int]] ) -> int:
"""simple docstring"""
def update_area_of_max_square(SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : int ) -> int:
# BASE CASE
if row >= rows or col >= cols:
return 0
__lowerCAmelCase: Union[str, Any] = update_area_of_max_square(SCREAMING_SNAKE_CASE , col + 1 )
__lowerCAmelCase: Tuple = update_area_of_max_square(row + 1 , col + 1 )
__lowerCAmelCase: int = update_area_of_max_square(row + 1 , SCREAMING_SNAKE_CASE )
if mat[row][col]:
__lowerCAmelCase: List[str] = 1 + min([right, diagonal, down] )
__lowerCAmelCase: List[str] = max(largest_square_area[0] , SCREAMING_SNAKE_CASE )
return sub_problem_sol
else:
return 0
__lowerCAmelCase: List[str] = [0]
update_area_of_max_square(0 , 0 )
return largest_square_area[0]
def _a ( SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : list[list[int]] ) -> int:
"""simple docstring"""
def update_area_of_max_square_using_dp_array(
SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : list[list[int]] ) -> int:
if row >= rows or col >= cols:
return 0
if dp_array[row][col] != -1:
return dp_array[row][col]
__lowerCAmelCase: List[Any] = update_area_of_max_square_using_dp_array(SCREAMING_SNAKE_CASE , col + 1 , SCREAMING_SNAKE_CASE )
__lowerCAmelCase: Union[str, Any] = update_area_of_max_square_using_dp_array(row + 1 , col + 1 , SCREAMING_SNAKE_CASE )
__lowerCAmelCase: Any = update_area_of_max_square_using_dp_array(row + 1 , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
if mat[row][col]:
__lowerCAmelCase: int = 1 + min([right, diagonal, down] )
__lowerCAmelCase: Union[str, Any] = max(largest_square_area[0] , SCREAMING_SNAKE_CASE )
__lowerCAmelCase: Optional[int] = sub_problem_sol
return sub_problem_sol
else:
return 0
__lowerCAmelCase: int = [0]
__lowerCAmelCase: int = [[-1] * cols for _ in range(SCREAMING_SNAKE_CASE )]
update_area_of_max_square_using_dp_array(0 , 0 , SCREAMING_SNAKE_CASE )
return largest_square_area[0]
def _a ( SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : list[list[int]] ) -> int:
"""simple docstring"""
__lowerCAmelCase: int = [[0] * (cols + 1) for _ in range(rows + 1 )]
__lowerCAmelCase: Optional[Any] = 0
for row in range(rows - 1 , -1 , -1 ):
for col in range(cols - 1 , -1 , -1 ):
__lowerCAmelCase: Union[str, Any] = dp_array[row][col + 1]
__lowerCAmelCase: str = dp_array[row + 1][col + 1]
__lowerCAmelCase: Optional[int] = dp_array[row + 1][col]
if mat[row][col] == 1:
__lowerCAmelCase: Optional[Any] = 1 + min(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
__lowerCAmelCase: str = max(dp_array[row][col] , SCREAMING_SNAKE_CASE )
else:
__lowerCAmelCase: Dict = 0
return largest_square_area
def _a ( SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : list[list[int]] ) -> int:
"""simple docstring"""
__lowerCAmelCase: Tuple = [0] * (cols + 1)
__lowerCAmelCase: Optional[int] = [0] * (cols + 1)
__lowerCAmelCase: str = 0
for row in range(rows - 1 , -1 , -1 ):
for col in range(cols - 1 , -1 , -1 ):
__lowerCAmelCase: int = current_row[col + 1]
__lowerCAmelCase: Union[str, Any] = next_row[col + 1]
__lowerCAmelCase: Any = next_row[col]
if mat[row][col] == 1:
__lowerCAmelCase: str = 1 + min(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
__lowerCAmelCase: str = max(current_row[col] , SCREAMING_SNAKE_CASE )
else:
__lowerCAmelCase: Optional[Any] = 0
__lowerCAmelCase: int = current_row
return largest_square_area
if __name__ == "__main__":
import doctest
doctest.testmod()
print(largest_square_area_in_matrix_bottom_up(2, 2, [[1, 1], [1, 1]]))
| 322 | 1 |
def _a ( SCREAMING_SNAKE_CASE : Union[str, Any] ) -> int:
"""simple docstring"""
__lowerCAmelCase: str = 1
__lowerCAmelCase: str = 2
while i * i <= n:
__lowerCAmelCase: List[Any] = 0
while n % i == 0:
n //= i
multiplicity += 1
n_divisors *= multiplicity + 1
i += 1
if n > 1:
n_divisors *= 2
return n_divisors
def _a ( ) -> Tuple:
"""simple docstring"""
__lowerCAmelCase: Any = 1
__lowerCAmelCase: List[str] = 1
while True:
i += 1
t_num += i
if count_divisors(SCREAMING_SNAKE_CASE ) > 5_00:
break
return t_num
if __name__ == "__main__":
print(solution())
| 322 |
import argparse
import json
import os
from tensorflow.core.protobuf.saved_model_pba import SavedModel
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_copies.py
_a = '''.'''
# Internal TensorFlow ops that can be safely ignored (mostly specific to a saved model)
_a = [
'''Assert''',
'''AssignVariableOp''',
'''EmptyTensorList''',
'''MergeV2Checkpoints''',
'''ReadVariableOp''',
'''ResourceGather''',
'''RestoreV2''',
'''SaveV2''',
'''ShardedFilename''',
'''StatefulPartitionedCall''',
'''StaticRegexFullMatch''',
'''VarHandleOp''',
]
def _a ( SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : Dict ) -> Union[str, Any]:
"""simple docstring"""
__lowerCAmelCase: Optional[int] = SavedModel()
__lowerCAmelCase: str = []
with open(os.path.join(SCREAMING_SNAKE_CASE , 'utils' , 'tf_ops' , 'onnx.json' ) ) as f:
__lowerCAmelCase: List[str] = json.load(SCREAMING_SNAKE_CASE )['opsets']
for i in range(1 , opset + 1 ):
onnx_ops.extend(onnx_opsets[str(SCREAMING_SNAKE_CASE )] )
with open(SCREAMING_SNAKE_CASE , 'rb' ) as f:
saved_model.ParseFromString(f.read() )
__lowerCAmelCase: Optional[int] = set()
# Iterate over every metagraph in case there is more than one (a saved model can contain multiple graphs)
for meta_graph in saved_model.meta_graphs:
# Add operations in the graph definition
model_op_names.update(node.op for node in meta_graph.graph_def.node )
# Go through the functions in the graph definition
for func in meta_graph.graph_def.library.function:
# Add operations in each function
model_op_names.update(node.op for node in func.node_def )
# Convert to list, sorted if you want
__lowerCAmelCase: List[str] = sorted(SCREAMING_SNAKE_CASE )
__lowerCAmelCase: Optional[int] = []
for op in model_op_names:
if op not in onnx_ops and op not in INTERNAL_OPS:
incompatible_ops.append(SCREAMING_SNAKE_CASE )
if strict and len(SCREAMING_SNAKE_CASE ) > 0:
raise Exception(f'''Found the following incompatible ops for the opset {opset}:\n''' + incompatible_ops )
elif len(SCREAMING_SNAKE_CASE ) > 0:
print(f'''Found the following incompatible ops for the opset {opset}:''' )
print(*SCREAMING_SNAKE_CASE , sep='\n' )
else:
print(f'''The saved model {saved_model_path} can properly be converted with ONNX.''' )
if __name__ == "__main__":
_a = argparse.ArgumentParser()
parser.add_argument('''--saved_model_path''', help='''Path of the saved model to check (the .pb file).''')
parser.add_argument(
'''--opset''', default=1_2, type=int, help='''The ONNX opset against which the model has to be tested.'''
)
parser.add_argument(
'''--framework''', choices=['''onnx'''], default='''onnx''', help='''Frameworks against which to test the saved model.'''
)
parser.add_argument(
'''--strict''', action='''store_true''', help='''Whether make the checking strict (raise errors) or not (raise warnings)'''
)
_a = parser.parse_args()
if args.framework == "onnx":
onnx_compliancy(args.saved_model_path, args.strict, args.opset)
| 322 | 1 |
def _a ( SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : int ) -> int:
"""simple docstring"""
return x if y == 0 else greatest_common_divisor(SCREAMING_SNAKE_CASE , x % y )
def _a ( SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : int ) -> int:
"""simple docstring"""
return (x * y) // greatest_common_divisor(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
def _a ( SCREAMING_SNAKE_CASE : int = 20 ) -> int:
"""simple docstring"""
__lowerCAmelCase: List[Any] = 1
for i in range(1 , n + 1 ):
__lowerCAmelCase: List[Any] = lcm(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
return g
if __name__ == "__main__":
print(f"{solution() = }")
| 322 |
import math
import qiskit
def _a ( SCREAMING_SNAKE_CASE : int = 1 , SCREAMING_SNAKE_CASE : int = 1 , SCREAMING_SNAKE_CASE : int = 1 ) -> qiskit.result.counts.Counts:
"""simple docstring"""
if (
isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
or isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
or isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
):
raise TypeError('inputs must be integers.' )
if (input_a < 0) or (input_a < 0) or (carry_in < 0):
raise ValueError('inputs must be positive.' )
if (
(math.floor(SCREAMING_SNAKE_CASE ) != input_a)
or (math.floor(SCREAMING_SNAKE_CASE ) != input_a)
or (math.floor(SCREAMING_SNAKE_CASE ) != carry_in)
):
raise ValueError('inputs must be exact integers.' )
if (input_a > 2) or (input_a > 2) or (carry_in > 2):
raise ValueError('inputs must be less or equal to 2.' )
# build registers
__lowerCAmelCase: Union[str, Any] = qiskit.QuantumRegister(4 , 'qr' )
__lowerCAmelCase: List[Any] = qiskit.ClassicalRegister(2 , 'cr' )
# list the entries
__lowerCAmelCase: Any = [input_a, input_a, carry_in]
__lowerCAmelCase: List[str] = qiskit.QuantumCircuit(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
for i in range(0 , 3 ):
if entry[i] == 2:
quantum_circuit.h(SCREAMING_SNAKE_CASE ) # for hadamard entries
elif entry[i] == 1:
quantum_circuit.x(SCREAMING_SNAKE_CASE ) # for 1 entries
elif entry[i] == 0:
quantum_circuit.i(SCREAMING_SNAKE_CASE ) # for 0 entries
# build the circuit
quantum_circuit.ccx(0 , 1 , 3 ) # ccx = toffoli gate
quantum_circuit.cx(0 , 1 )
quantum_circuit.ccx(1 , 2 , 3 )
quantum_circuit.cx(1 , 2 )
quantum_circuit.cx(0 , 1 )
quantum_circuit.measure([2, 3] , SCREAMING_SNAKE_CASE ) # measure the last two qbits
__lowerCAmelCase: List[str] = qiskit.Aer.get_backend('aer_simulator' )
__lowerCAmelCase: List[Any] = qiskit.execute(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , shots=10_00 )
return job.result().get_counts(SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
print(f"Total sum count for state is: {quantum_full_adder(1, 1, 1)}")
| 322 | 1 |
import datasets
from .evaluate import evaluate
_a = '''\
@inproceedings{Rajpurkar2016SQuAD10,
title={SQuAD: 100, 000+ Questions for Machine Comprehension of Text},
author={Pranav Rajpurkar and Jian Zhang and Konstantin Lopyrev and Percy Liang},
booktitle={EMNLP},
year={2016}
}
'''
_a = '''
This metric wrap the official scoring script for version 1 of the Stanford Question Answering Dataset (SQuAD).
Stanford Question Answering Dataset (SQuAD) is a reading comprehension dataset, consisting of questions posed by
crowdworkers on a set of Wikipedia articles, where the answer to every question is a segment of text, or span,
from the corresponding reading passage, or the question might be unanswerable.
'''
_a = '''
Computes SQuAD scores (F1 and EM).
Args:
predictions: List of question-answers dictionaries with the following key-values:
- \'id\': id of the question-answer pair as given in the references (see below)
- \'prediction_text\': the text of the answer
references: List of question-answers dictionaries with the following key-values:
- \'id\': id of the question-answer pair (see above),
- \'answers\': a Dict in the SQuAD dataset format
{
\'text\': list of possible texts for the answer, as a list of strings
\'answer_start\': list of start positions for the answer, as a list of ints
}
Note that answer_start values are not taken into account to compute the metric.
Returns:
\'exact_match\': Exact match (the normalized answer exactly match the gold answer)
\'f1\': The F-score of predicted tokens versus the gold answer
Examples:
>>> predictions = [{\'prediction_text\': \'1976\', \'id\': \'56e10a3be3433e1400422b22\'}]
>>> references = [{\'answers\': {\'answer_start\': [97], \'text\': [\'1976\']}, \'id\': \'56e10a3be3433e1400422b22\'}]
>>> squad_metric = datasets.load_metric("squad")
>>> results = squad_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'exact_match\': 100.0, \'f1\': 100.0}
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class A_ ( datasets.Metric ):
def UpperCAmelCase ( self : int ) -> Any:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': {'id': datasets.Value('string' ), 'prediction_text': datasets.Value('string' )},
'references': {
'id': datasets.Value('string' ),
'answers': datasets.features.Sequence(
{
'text': datasets.Value('string' ),
'answer_start': datasets.Value('int32' ),
} ),
},
} ) , codebase_urls=['https://rajpurkar.github.io/SQuAD-explorer/'] , reference_urls=['https://rajpurkar.github.io/SQuAD-explorer/'] , )
def UpperCAmelCase ( self : List[str] , UpperCAmelCase : Tuple , UpperCAmelCase : List[Any] ) -> List[str]:
__lowerCAmelCase: Union[str, Any] = {prediction['id']: prediction['prediction_text'] for prediction in predictions}
__lowerCAmelCase: Optional[int] = [
{
'paragraphs': [
{
'qas': [
{
'answers': [{'text': answer_text} for answer_text in ref['answers']['text']],
'id': ref['id'],
}
for ref in references
]
}
]
}
]
__lowerCAmelCase: List[str] = evaluate(dataset=UpperCAmelCase , predictions=UpperCAmelCase )
return score
| 322 |
from __future__ import annotations
import copy
import inspect
import unittest
import numpy as np
from transformers import is_tf_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_tf, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST,
TF_MODEL_FOR_MULTIPLE_CHOICE_MAPPING,
TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
LayoutLMvaConfig,
TFLayoutLMvaForQuestionAnswering,
TFLayoutLMvaForSequenceClassification,
TFLayoutLMvaForTokenClassification,
TFLayoutLMvaModel,
)
if is_vision_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class A_ :
def __init__( self : Dict , UpperCAmelCase : Optional[Any] , UpperCAmelCase : Optional[int]=2 , UpperCAmelCase : int=3 , UpperCAmelCase : int=4 , UpperCAmelCase : str=2 , UpperCAmelCase : Union[str, Any]=7 , UpperCAmelCase : List[str]=True , UpperCAmelCase : Dict=True , UpperCAmelCase : Tuple=True , UpperCAmelCase : Optional[Any]=True , UpperCAmelCase : Optional[Any]=9_9 , UpperCAmelCase : Tuple=3_6 , UpperCAmelCase : Tuple=2 , UpperCAmelCase : Optional[int]=4 , UpperCAmelCase : Union[str, Any]=3_7 , UpperCAmelCase : Any="gelu" , UpperCAmelCase : Optional[int]=0.1 , UpperCAmelCase : Optional[Any]=0.1 , UpperCAmelCase : List[str]=5_1_2 , UpperCAmelCase : int=1_6 , UpperCAmelCase : Optional[Any]=2 , UpperCAmelCase : Optional[Any]=0.02 , UpperCAmelCase : Optional[Any]=6 , UpperCAmelCase : int=6 , UpperCAmelCase : str=3 , UpperCAmelCase : Any=4 , UpperCAmelCase : Optional[int]=None , UpperCAmelCase : List[str]=1_0_0_0 , ) -> int:
__lowerCAmelCase: List[str] = parent
__lowerCAmelCase: List[str] = batch_size
__lowerCAmelCase: Optional[Any] = num_channels
__lowerCAmelCase: Tuple = image_size
__lowerCAmelCase: str = patch_size
__lowerCAmelCase: List[str] = is_training
__lowerCAmelCase: Union[str, Any] = use_input_mask
__lowerCAmelCase: Union[str, Any] = use_token_type_ids
__lowerCAmelCase: Tuple = use_labels
__lowerCAmelCase: Optional[int] = vocab_size
__lowerCAmelCase: Any = hidden_size
__lowerCAmelCase: Tuple = num_hidden_layers
__lowerCAmelCase: Optional[int] = num_attention_heads
__lowerCAmelCase: Dict = intermediate_size
__lowerCAmelCase: Union[str, Any] = hidden_act
__lowerCAmelCase: str = hidden_dropout_prob
__lowerCAmelCase: str = attention_probs_dropout_prob
__lowerCAmelCase: str = max_position_embeddings
__lowerCAmelCase: str = type_vocab_size
__lowerCAmelCase: Optional[Any] = type_sequence_label_size
__lowerCAmelCase: Union[str, Any] = initializer_range
__lowerCAmelCase: List[str] = coordinate_size
__lowerCAmelCase: Tuple = shape_size
__lowerCAmelCase: List[Any] = num_labels
__lowerCAmelCase: Any = num_choices
__lowerCAmelCase: List[str] = scope
__lowerCAmelCase: Dict = range_bbox
# LayoutLMv3's sequence length equals the number of text tokens + number of patches + 1 (we add 1 for the CLS token)
__lowerCAmelCase: Optional[Any] = text_seq_length
__lowerCAmelCase: List[Any] = (image_size // patch_size) ** 2 + 1
__lowerCAmelCase: int = self.text_seq_length + self.image_seq_length
def UpperCAmelCase ( self : Any ) -> Any:
__lowerCAmelCase: Dict = ids_tensor([self.batch_size, self.text_seq_length] , self.vocab_size )
__lowerCAmelCase: Any = ids_tensor([self.batch_size, self.text_seq_length, 4] , self.range_bbox )
__lowerCAmelCase: str = bbox.numpy()
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
__lowerCAmelCase: Optional[Any] = bbox[i, j, 3]
__lowerCAmelCase: Tuple = bbox[i, j, 1]
__lowerCAmelCase: Dict = tmp_coordinate
if bbox[i, j, 2] < bbox[i, j, 0]:
__lowerCAmelCase: Any = bbox[i, j, 2]
__lowerCAmelCase: int = bbox[i, j, 0]
__lowerCAmelCase: int = tmp_coordinate
__lowerCAmelCase: List[Any] = tf.constant(UpperCAmelCase )
__lowerCAmelCase: Dict = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__lowerCAmelCase: Union[str, Any] = None
if self.use_input_mask:
__lowerCAmelCase: List[Any] = random_attention_mask([self.batch_size, self.text_seq_length] )
__lowerCAmelCase: int = None
if self.use_token_type_ids:
__lowerCAmelCase: List[Any] = ids_tensor([self.batch_size, self.text_seq_length] , self.type_vocab_size )
__lowerCAmelCase: str = None
__lowerCAmelCase: Dict = None
if self.use_labels:
__lowerCAmelCase: Dict = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__lowerCAmelCase: List[str] = ids_tensor([self.batch_size, self.text_seq_length] , self.num_labels )
__lowerCAmelCase: Dict = LayoutLMvaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , coordinate_size=self.coordinate_size , shape_size=self.shape_size , input_size=self.image_size , patch_size=self.patch_size , )
return config, input_ids, bbox, pixel_values, token_type_ids, input_mask, sequence_labels, token_labels
def UpperCAmelCase ( self : Tuple , UpperCAmelCase : str , UpperCAmelCase : Optional[int] , UpperCAmelCase : Optional[int] , UpperCAmelCase : int , UpperCAmelCase : List[Any] , UpperCAmelCase : Tuple ) -> int:
__lowerCAmelCase: Tuple = TFLayoutLMvaModel(config=UpperCAmelCase )
# text + image
__lowerCAmelCase: Dict = model(UpperCAmelCase , pixel_values=UpperCAmelCase , training=UpperCAmelCase )
__lowerCAmelCase: List[str] = model(
UpperCAmelCase , bbox=UpperCAmelCase , pixel_values=UpperCAmelCase , attention_mask=UpperCAmelCase , token_type_ids=UpperCAmelCase , training=UpperCAmelCase , )
__lowerCAmelCase: Optional[Any] = model(UpperCAmelCase , bbox=UpperCAmelCase , pixel_values=UpperCAmelCase , training=UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
# text only
__lowerCAmelCase: str = model(UpperCAmelCase , training=UpperCAmelCase )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.text_seq_length, self.hidden_size) )
# image only
__lowerCAmelCase: List[str] = model({'pixel_values': pixel_values} , training=UpperCAmelCase )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.image_seq_length, self.hidden_size) )
def UpperCAmelCase ( self : Union[str, Any] , UpperCAmelCase : Optional[int] , UpperCAmelCase : Any , UpperCAmelCase : Optional[int] , UpperCAmelCase : List[str] , UpperCAmelCase : int , UpperCAmelCase : Tuple , UpperCAmelCase : List[Any] ) -> int:
__lowerCAmelCase: List[str] = self.num_labels
__lowerCAmelCase: Tuple = TFLayoutLMvaForSequenceClassification(config=UpperCAmelCase )
__lowerCAmelCase: int = model(
UpperCAmelCase , bbox=UpperCAmelCase , pixel_values=UpperCAmelCase , attention_mask=UpperCAmelCase , token_type_ids=UpperCAmelCase , labels=UpperCAmelCase , training=UpperCAmelCase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def UpperCAmelCase ( self : Any , UpperCAmelCase : List[Any] , UpperCAmelCase : Optional[Any] , UpperCAmelCase : int , UpperCAmelCase : int , UpperCAmelCase : List[Any] , UpperCAmelCase : str , UpperCAmelCase : int ) -> Any:
__lowerCAmelCase: Union[str, Any] = self.num_labels
__lowerCAmelCase: List[str] = TFLayoutLMvaForTokenClassification(config=UpperCAmelCase )
__lowerCAmelCase: Any = model(
UpperCAmelCase , bbox=UpperCAmelCase , pixel_values=UpperCAmelCase , attention_mask=UpperCAmelCase , token_type_ids=UpperCAmelCase , labels=UpperCAmelCase , training=UpperCAmelCase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.text_seq_length, self.num_labels) )
def UpperCAmelCase ( self : List[Any] , UpperCAmelCase : Any , UpperCAmelCase : str , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : Any , UpperCAmelCase : str , UpperCAmelCase : List[str] , UpperCAmelCase : List[Any] ) -> Any:
__lowerCAmelCase: str = 2
__lowerCAmelCase: Dict = TFLayoutLMvaForQuestionAnswering(config=UpperCAmelCase )
__lowerCAmelCase: int = model(
UpperCAmelCase , bbox=UpperCAmelCase , pixel_values=UpperCAmelCase , attention_mask=UpperCAmelCase , token_type_ids=UpperCAmelCase , start_positions=UpperCAmelCase , end_positions=UpperCAmelCase , training=UpperCAmelCase , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def UpperCAmelCase ( self : List[str] ) -> Union[str, Any]:
__lowerCAmelCase: Union[str, Any] = self.prepare_config_and_inputs()
((__lowerCAmelCase) , (__lowerCAmelCase) , (__lowerCAmelCase) , (__lowerCAmelCase) , (__lowerCAmelCase) , (__lowerCAmelCase) , (__lowerCAmelCase) , (__lowerCAmelCase)): List[str] = config_and_inputs
__lowerCAmelCase: List[str] = {
'input_ids': input_ids,
'bbox': bbox,
'pixel_values': pixel_values,
'token_type_ids': token_type_ids,
'attention_mask': input_mask,
}
return config, inputs_dict
@require_tf
class A_ ( snake_case__ , snake_case__ , unittest.TestCase ):
_lowercase : List[Any] = (
(
TFLayoutLMvaModel,
TFLayoutLMvaForQuestionAnswering,
TFLayoutLMvaForSequenceClassification,
TFLayoutLMvaForTokenClassification,
)
if is_tf_available()
else ()
)
_lowercase : Tuple = (
{'document-question-answering': TFLayoutLMvaForQuestionAnswering, 'feature-extraction': TFLayoutLMvaModel}
if is_tf_available()
else {}
)
_lowercase : Union[str, Any] = False
_lowercase : Dict = False
_lowercase : Tuple = False
def UpperCAmelCase ( self : Optional[Any] , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : int , UpperCAmelCase : Tuple , UpperCAmelCase : Optional[Any] , UpperCAmelCase : Optional[Any] ) -> List[str]:
return True
def UpperCAmelCase ( self : Tuple , UpperCAmelCase : int , UpperCAmelCase : Tuple , UpperCAmelCase : Dict=False ) -> dict:
__lowerCAmelCase: Optional[Any] = copy.deepcopy(UpperCAmelCase )
if model_class in get_values(UpperCAmelCase ):
__lowerCAmelCase: int = {
k: tf.tile(tf.expand_dims(UpperCAmelCase , 1 ) , (1, self.model_tester.num_choices) + (1,) * (v.ndim - 1) )
if isinstance(UpperCAmelCase , tf.Tensor ) and v.ndim > 0
else v
for k, v in inputs_dict.items()
}
if return_labels:
if model_class in get_values(UpperCAmelCase ):
__lowerCAmelCase: Tuple = tf.ones(self.model_tester.batch_size , dtype=tf.intaa )
elif model_class in get_values(UpperCAmelCase ):
__lowerCAmelCase: Optional[Any] = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa )
__lowerCAmelCase: Union[str, Any] = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa )
elif model_class in get_values(UpperCAmelCase ):
__lowerCAmelCase: Union[str, Any] = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa )
elif model_class in get_values(UpperCAmelCase ):
__lowerCAmelCase: str = tf.zeros(
(self.model_tester.batch_size, self.model_tester.text_seq_length) , dtype=tf.intaa )
return inputs_dict
def UpperCAmelCase ( self : Optional[int] ) -> Optional[Any]:
__lowerCAmelCase: Tuple = TFLayoutLMvaModelTester(self )
__lowerCAmelCase: str = ConfigTester(self , config_class=UpperCAmelCase , hidden_size=3_7 )
def UpperCAmelCase ( self : Tuple ) -> Dict:
self.config_tester.run_common_tests()
def UpperCAmelCase ( self : List[Any] ) -> Tuple:
__lowerCAmelCase , __lowerCAmelCase: Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowerCAmelCase: List[Any] = model_class(UpperCAmelCase )
if getattr(UpperCAmelCase , 'hf_compute_loss' , UpperCAmelCase ):
# The number of elements in the loss should be the same as the number of elements in the label
__lowerCAmelCase: Optional[int] = self._prepare_for_class(inputs_dict.copy() , UpperCAmelCase , return_labels=UpperCAmelCase )
__lowerCAmelCase: List[Any] = prepared_for_class[
sorted(prepared_for_class.keys() - inputs_dict.keys() , reverse=UpperCAmelCase )[0]
]
__lowerCAmelCase: Tuple = added_label.shape.as_list()[:1]
# Test that model correctly compute the loss with kwargs
__lowerCAmelCase: Optional[Any] = self._prepare_for_class(inputs_dict.copy() , UpperCAmelCase , return_labels=UpperCAmelCase )
__lowerCAmelCase: Tuple = prepared_for_class.pop('input_ids' )
__lowerCAmelCase: Union[str, Any] = model(UpperCAmelCase , **UpperCAmelCase )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
# Test that model correctly compute the loss when we mask some positions
__lowerCAmelCase: Union[str, Any] = self._prepare_for_class(inputs_dict.copy() , UpperCAmelCase , return_labels=UpperCAmelCase )
__lowerCAmelCase: Optional[int] = prepared_for_class.pop('input_ids' )
if "labels" in prepared_for_class:
__lowerCAmelCase: str = prepared_for_class['labels'].numpy()
if len(labels.shape ) > 1 and labels.shape[1] != 1:
__lowerCAmelCase: Tuple = -1_0_0
__lowerCAmelCase: Union[str, Any] = tf.convert_to_tensor(UpperCAmelCase )
__lowerCAmelCase: Dict = model(UpperCAmelCase , **UpperCAmelCase )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
self.assertTrue(not np.any(np.isnan(loss.numpy() ) ) )
# Test that model correctly compute the loss with a dict
__lowerCAmelCase: str = self._prepare_for_class(inputs_dict.copy() , UpperCAmelCase , return_labels=UpperCAmelCase )
__lowerCAmelCase: Optional[Any] = model(UpperCAmelCase )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
# Test that model correctly compute the loss with a tuple
__lowerCAmelCase: Any = self._prepare_for_class(inputs_dict.copy() , UpperCAmelCase , return_labels=UpperCAmelCase )
# Get keys that were added with the _prepare_for_class function
__lowerCAmelCase: Tuple = prepared_for_class.keys() - inputs_dict.keys()
__lowerCAmelCase: Dict = inspect.signature(model.call ).parameters
__lowerCAmelCase: Dict = list(signature.keys() )
# Create a dictionary holding the location of the tensors in the tuple
__lowerCAmelCase: str = {0: 'input_ids'}
for label_key in label_keys:
__lowerCAmelCase: Optional[Any] = signature_names.index(UpperCAmelCase )
__lowerCAmelCase: Tuple = label_key
__lowerCAmelCase: Tuple = sorted(tuple_index_mapping.items() )
# Initialize a list with their default values, update the values and convert to a tuple
__lowerCAmelCase: List[Any] = []
for name in signature_names:
if name != "kwargs":
list_input.append(signature[name].default )
for index, value in sorted_tuple_index_mapping:
__lowerCAmelCase: Optional[Any] = prepared_for_class[value]
__lowerCAmelCase: Union[str, Any] = tuple(UpperCAmelCase )
# Send to model
__lowerCAmelCase: Any = model(tuple_input[:-1] )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
def UpperCAmelCase ( self : Dict ) -> Tuple:
(
(
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) ,
): str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
def UpperCAmelCase ( self : Dict ) -> int:
(
(
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) ,
): str = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
__lowerCAmelCase: Tuple = type
self.model_tester.create_and_check_model(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
def UpperCAmelCase ( self : str ) -> List[str]:
(
(
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) ,
): Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
def UpperCAmelCase ( self : int ) -> List[str]:
(
(
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) ,
): Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
def UpperCAmelCase ( self : Tuple ) -> str:
(
(
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) ,
): Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
@slow
def UpperCAmelCase ( self : Union[str, Any] ) -> Tuple:
for model_name in TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowerCAmelCase: Optional[int] = TFLayoutLMvaModel.from_pretrained(UpperCAmelCase )
self.assertIsNotNone(UpperCAmelCase )
def _a ( ) -> Any:
"""simple docstring"""
__lowerCAmelCase: Dict = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_tf
class A_ ( unittest.TestCase ):
@cached_property
def UpperCAmelCase ( self : int ) -> Dict:
return LayoutLMvaImageProcessor(apply_ocr=UpperCAmelCase ) if is_vision_available() else None
@slow
def UpperCAmelCase ( self : Any ) -> List[str]:
__lowerCAmelCase: Any = TFLayoutLMvaModel.from_pretrained('microsoft/layoutlmv3-base' )
__lowerCAmelCase: Tuple = self.default_image_processor
__lowerCAmelCase: str = prepare_img()
__lowerCAmelCase: Optional[int] = image_processor(images=UpperCAmelCase , return_tensors='tf' ).pixel_values
__lowerCAmelCase: Dict = tf.constant([[1, 2]] )
__lowerCAmelCase: str = tf.expand_dims(tf.constant([[1, 2, 3, 4], [5, 6, 7, 8]] ) , axis=0 )
# forward pass
__lowerCAmelCase: List[str] = model(input_ids=UpperCAmelCase , bbox=UpperCAmelCase , pixel_values=UpperCAmelCase , training=UpperCAmelCase )
# verify the logits
__lowerCAmelCase: Tuple = (1, 1_9_9, 7_6_8)
self.assertEqual(outputs.last_hidden_state.shape , UpperCAmelCase )
__lowerCAmelCase: str = tf.constant(
[[-0.0529, 0.3618, 0.1632], [-0.1587, -0.1667, -0.0400], [-0.1557, -0.1671, -0.0505]] )
self.assertTrue(np.allclose(outputs.last_hidden_state[0, :3, :3] , UpperCAmelCase , atol=1E-4 ) )
| 322 | 1 |
import unittest
from transformers import XLMConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
XLMForMultipleChoice,
XLMForQuestionAnswering,
XLMForQuestionAnsweringSimple,
XLMForSequenceClassification,
XLMForTokenClassification,
XLMModel,
XLMWithLMHeadModel,
)
from transformers.models.xlm.modeling_xlm import XLM_PRETRAINED_MODEL_ARCHIVE_LIST
class A_ :
def __init__( self : Any , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : str=1_3 , UpperCAmelCase : Optional[Any]=7 , UpperCAmelCase : str=True , UpperCAmelCase : Any=True , UpperCAmelCase : Tuple=True , UpperCAmelCase : Any=True , UpperCAmelCase : Optional[int]=True , UpperCAmelCase : List[str]=False , UpperCAmelCase : Tuple=False , UpperCAmelCase : int=False , UpperCAmelCase : Optional[int]=2 , UpperCAmelCase : Any=9_9 , UpperCAmelCase : str=0 , UpperCAmelCase : Dict=3_2 , UpperCAmelCase : int=5 , UpperCAmelCase : Optional[int]=4 , UpperCAmelCase : Any=0.1 , UpperCAmelCase : str=0.1 , UpperCAmelCase : int=5_1_2 , UpperCAmelCase : str=2 , UpperCAmelCase : Optional[int]=0.02 , UpperCAmelCase : Optional[Any]=2 , UpperCAmelCase : List[str]=4 , UpperCAmelCase : Dict="last" , UpperCAmelCase : int=True , UpperCAmelCase : Dict=None , UpperCAmelCase : Union[str, Any]=0 , ) -> Dict:
__lowerCAmelCase: Optional[int] = parent
__lowerCAmelCase: Dict = batch_size
__lowerCAmelCase: Tuple = seq_length
__lowerCAmelCase: Tuple = is_training
__lowerCAmelCase: Optional[Any] = use_input_lengths
__lowerCAmelCase: List[str] = use_token_type_ids
__lowerCAmelCase: Dict = use_labels
__lowerCAmelCase: int = gelu_activation
__lowerCAmelCase: Optional[int] = sinusoidal_embeddings
__lowerCAmelCase: Tuple = causal
__lowerCAmelCase: Optional[Any] = asm
__lowerCAmelCase: int = n_langs
__lowerCAmelCase: Tuple = vocab_size
__lowerCAmelCase: List[Any] = n_special
__lowerCAmelCase: List[Any] = hidden_size
__lowerCAmelCase: Union[str, Any] = num_hidden_layers
__lowerCAmelCase: Dict = num_attention_heads
__lowerCAmelCase: int = hidden_dropout_prob
__lowerCAmelCase: List[str] = attention_probs_dropout_prob
__lowerCAmelCase: Dict = max_position_embeddings
__lowerCAmelCase: List[str] = type_sequence_label_size
__lowerCAmelCase: str = initializer_range
__lowerCAmelCase: List[str] = num_labels
__lowerCAmelCase: List[str] = num_choices
__lowerCAmelCase: Optional[int] = summary_type
__lowerCAmelCase: Any = use_proj
__lowerCAmelCase: Optional[Any] = scope
__lowerCAmelCase: Dict = bos_token_id
def UpperCAmelCase ( self : Union[str, Any] ) -> Tuple:
__lowerCAmelCase: Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__lowerCAmelCase: str = random_attention_mask([self.batch_size, self.seq_length] )
__lowerCAmelCase: Any = None
if self.use_input_lengths:
__lowerCAmelCase: Optional[Any] = (
ids_tensor([self.batch_size] , vocab_size=2 ) + self.seq_length - 2
) # small variation of seq_length
__lowerCAmelCase: str = None
if self.use_token_type_ids:
__lowerCAmelCase: Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.n_langs )
__lowerCAmelCase: int = None
__lowerCAmelCase: Optional[int] = None
__lowerCAmelCase: Optional[int] = None
if self.use_labels:
__lowerCAmelCase: Tuple = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__lowerCAmelCase: Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__lowerCAmelCase: Optional[int] = ids_tensor([self.batch_size] , 2 ).float()
__lowerCAmelCase: str = ids_tensor([self.batch_size] , self.num_choices )
__lowerCAmelCase: Dict = self.get_config()
return (
config,
input_ids,
token_type_ids,
input_lengths,
sequence_labels,
token_labels,
is_impossible_labels,
choice_labels,
input_mask,
)
def UpperCAmelCase ( self : Tuple ) -> List[Any]:
return XLMConfig(
vocab_size=self.vocab_size , n_special=self.n_special , emb_dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , gelu_activation=self.gelu_activation , sinusoidal_embeddings=self.sinusoidal_embeddings , asm=self.asm , causal=self.causal , n_langs=self.n_langs , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , summary_type=self.summary_type , use_proj=self.use_proj , num_labels=self.num_labels , bos_token_id=self.bos_token_id , )
def UpperCAmelCase ( self : int , UpperCAmelCase : Optional[Any] , UpperCAmelCase : Tuple , UpperCAmelCase : Optional[int] , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : Dict , UpperCAmelCase : Optional[Any] , UpperCAmelCase : int , UpperCAmelCase : int , UpperCAmelCase : List[str] , ) -> Optional[int]:
__lowerCAmelCase: List[str] = XLMModel(config=UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
__lowerCAmelCase: Any = model(UpperCAmelCase , lengths=UpperCAmelCase , langs=UpperCAmelCase )
__lowerCAmelCase: List[str] = model(UpperCAmelCase , langs=UpperCAmelCase )
__lowerCAmelCase: List[str] = model(UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCAmelCase ( self : Union[str, Any] , UpperCAmelCase : Optional[Any] , UpperCAmelCase : Optional[int] , UpperCAmelCase : Optional[int] , UpperCAmelCase : Dict , UpperCAmelCase : Any , UpperCAmelCase : List[Any] , UpperCAmelCase : Tuple , UpperCAmelCase : Tuple , UpperCAmelCase : Dict , ) -> int:
__lowerCAmelCase: str = XLMWithLMHeadModel(UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
__lowerCAmelCase: Union[str, Any] = model(UpperCAmelCase , token_type_ids=UpperCAmelCase , labels=UpperCAmelCase )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCAmelCase ( self : Optional[int] , UpperCAmelCase : Optional[int] , UpperCAmelCase : Dict , UpperCAmelCase : Optional[int] , UpperCAmelCase : Any , UpperCAmelCase : Optional[int] , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : List[str] , UpperCAmelCase : str , UpperCAmelCase : Dict , ) -> List[str]:
__lowerCAmelCase: Dict = XLMForQuestionAnsweringSimple(UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
__lowerCAmelCase: str = model(UpperCAmelCase )
__lowerCAmelCase: List[str] = model(UpperCAmelCase , start_positions=UpperCAmelCase , end_positions=UpperCAmelCase )
__lowerCAmelCase: Optional[Any] = outputs
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def UpperCAmelCase ( self : List[str] , UpperCAmelCase : Dict , UpperCAmelCase : List[Any] , UpperCAmelCase : Optional[int] , UpperCAmelCase : Optional[Any] , UpperCAmelCase : str , UpperCAmelCase : List[str] , UpperCAmelCase : List[Any] , UpperCAmelCase : Any , UpperCAmelCase : Optional[int] , ) -> Tuple:
__lowerCAmelCase: Union[str, Any] = XLMForQuestionAnswering(UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
__lowerCAmelCase: List[str] = model(UpperCAmelCase )
__lowerCAmelCase: Union[str, Any] = model(
UpperCAmelCase , start_positions=UpperCAmelCase , end_positions=UpperCAmelCase , cls_index=UpperCAmelCase , is_impossible=UpperCAmelCase , p_mask=UpperCAmelCase , )
__lowerCAmelCase: Any = model(
UpperCAmelCase , start_positions=UpperCAmelCase , end_positions=UpperCAmelCase , cls_index=UpperCAmelCase , is_impossible=UpperCAmelCase , )
((__lowerCAmelCase) , ): List[str] = result_with_labels.to_tuple()
__lowerCAmelCase: Union[str, Any] = model(UpperCAmelCase , start_positions=UpperCAmelCase , end_positions=UpperCAmelCase )
((__lowerCAmelCase) , ): List[Any] = result_with_labels.to_tuple()
self.parent.assertEqual(result_with_labels.loss.shape , () )
self.parent.assertEqual(result.start_top_log_probs.shape , (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(result.start_top_index.shape , (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(
result.end_top_log_probs.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(
result.end_top_index.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(result.cls_logits.shape , (self.batch_size,) )
def UpperCAmelCase ( self : Dict , UpperCAmelCase : Dict , UpperCAmelCase : Optional[Any] , UpperCAmelCase : Optional[Any] , UpperCAmelCase : List[str] , UpperCAmelCase : List[str] , UpperCAmelCase : Any , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : str , UpperCAmelCase : List[str] , ) -> List[Any]:
__lowerCAmelCase: Optional[Any] = XLMForSequenceClassification(UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
__lowerCAmelCase: List[Any] = model(UpperCAmelCase )
__lowerCAmelCase: Tuple = model(UpperCAmelCase , labels=UpperCAmelCase )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def UpperCAmelCase ( self : Optional[Any] , UpperCAmelCase : str , UpperCAmelCase : int , UpperCAmelCase : str , UpperCAmelCase : Tuple , UpperCAmelCase : Optional[Any] , UpperCAmelCase : List[str] , UpperCAmelCase : List[str] , UpperCAmelCase : Dict , UpperCAmelCase : Union[str, Any] , ) -> List[Any]:
__lowerCAmelCase: Union[str, Any] = self.num_labels
__lowerCAmelCase: Tuple = XLMForTokenClassification(UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
__lowerCAmelCase: Optional[int] = model(UpperCAmelCase , attention_mask=UpperCAmelCase , labels=UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def UpperCAmelCase ( self : str , UpperCAmelCase : Tuple , UpperCAmelCase : str , UpperCAmelCase : Any , UpperCAmelCase : str , UpperCAmelCase : str , UpperCAmelCase : str , UpperCAmelCase : str , UpperCAmelCase : List[str] , UpperCAmelCase : List[Any] , ) -> Union[str, Any]:
__lowerCAmelCase: List[Any] = self.num_choices
__lowerCAmelCase: Optional[Any] = XLMForMultipleChoice(config=UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
__lowerCAmelCase: List[Any] = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__lowerCAmelCase: List[str] = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__lowerCAmelCase: Dict = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__lowerCAmelCase: Any = model(
UpperCAmelCase , attention_mask=UpperCAmelCase , token_type_ids=UpperCAmelCase , labels=UpperCAmelCase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def UpperCAmelCase ( self : Tuple ) -> int:
__lowerCAmelCase: Optional[Any] = self.prepare_config_and_inputs()
(
(
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) ,
): Union[str, Any] = config_and_inputs
__lowerCAmelCase: Any = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'lengths': input_lengths}
return config, inputs_dict
@require_torch
class A_ ( snake_case__ , snake_case__ , snake_case__ , unittest.TestCase ):
_lowercase : Any = (
(
XLMModel,
XLMWithLMHeadModel,
XLMForQuestionAnswering,
XLMForSequenceClassification,
XLMForQuestionAnsweringSimple,
XLMForTokenClassification,
XLMForMultipleChoice,
)
if is_torch_available()
else ()
)
_lowercase : Any = (
(XLMWithLMHeadModel,) if is_torch_available() else ()
) # TODO (PVP): Check other models whether language generation is also applicable
_lowercase : Optional[int] = (
{
'feature-extraction': XLMModel,
'fill-mask': XLMWithLMHeadModel,
'question-answering': XLMForQuestionAnsweringSimple,
'text-classification': XLMForSequenceClassification,
'text-generation': XLMWithLMHeadModel,
'token-classification': XLMForTokenClassification,
'zero-shot': XLMForSequenceClassification,
}
if is_torch_available()
else {}
)
def UpperCAmelCase ( self : Any , UpperCAmelCase : List[Any] , UpperCAmelCase : Optional[Any] , UpperCAmelCase : str , UpperCAmelCase : str , UpperCAmelCase : str ) -> int:
if (
pipeline_test_casse_name == "QAPipelineTests"
and tokenizer_name is not None
and not tokenizer_name.endswith('Fast' )
):
# `QAPipelineTests` fails for a few models when the slower tokenizer are used.
# (The slower tokenizers were never used for pipeline tests before the pipeline testing rework)
# TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer
return True
return False
def UpperCAmelCase ( self : Any , UpperCAmelCase : Optional[Any] , UpperCAmelCase : Tuple , UpperCAmelCase : Tuple=False ) -> Dict:
__lowerCAmelCase: Optional[Any] = super()._prepare_for_class(UpperCAmelCase , UpperCAmelCase , return_labels=UpperCAmelCase )
if return_labels:
if model_class.__name__ == "XLMForQuestionAnswering":
__lowerCAmelCase: str = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=UpperCAmelCase )
__lowerCAmelCase: Optional[Any] = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=UpperCAmelCase )
return inputs_dict
def UpperCAmelCase ( self : Union[str, Any] ) -> int:
__lowerCAmelCase: int = XLMModelTester(self )
__lowerCAmelCase: Optional[int] = ConfigTester(self , config_class=UpperCAmelCase , emb_dim=3_7 )
def UpperCAmelCase ( self : List[str] ) -> List[Any]:
self.config_tester.run_common_tests()
def UpperCAmelCase ( self : Dict ) -> List[Any]:
__lowerCAmelCase: str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_model(*UpperCAmelCase )
def UpperCAmelCase ( self : List[Any] ) -> int:
__lowerCAmelCase: Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_lm_head(*UpperCAmelCase )
def UpperCAmelCase ( self : Tuple ) -> Tuple:
__lowerCAmelCase: Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_simple_qa(*UpperCAmelCase )
def UpperCAmelCase ( self : Optional[Any] ) -> Tuple:
__lowerCAmelCase: Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_qa(*UpperCAmelCase )
def UpperCAmelCase ( self : Optional[Any] ) -> Any:
__lowerCAmelCase: List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_sequence_classif(*UpperCAmelCase )
def UpperCAmelCase ( self : Tuple ) -> Tuple:
__lowerCAmelCase: Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_token_classif(*UpperCAmelCase )
def UpperCAmelCase ( self : Any ) -> Any:
__lowerCAmelCase: Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_for_multiple_choice(*UpperCAmelCase )
def UpperCAmelCase ( self : Optional[Any] , UpperCAmelCase : int , UpperCAmelCase : Tuple , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : Dict , UpperCAmelCase : Optional[Any] , UpperCAmelCase : Optional[int]=False , UpperCAmelCase : Dict=1 ) -> Dict:
self.assertIsInstance(UpperCAmelCase , UpperCAmelCase )
self.assertListEqual(
[isinstance(UpperCAmelCase , UpperCAmelCase ) for iter_attentions in attentions] , [True] * len(UpperCAmelCase ) )
self.assertEqual(len(UpperCAmelCase ) , (max_length - min_length) * num_beam_groups )
for idx, iter_attentions in enumerate(UpperCAmelCase ):
# adds PAD dummy token
__lowerCAmelCase: int = min_length + idx + 1
__lowerCAmelCase: Union[str, Any] = min_length + idx + 1
__lowerCAmelCase: Any = (
batch_size * num_beam_groups,
config.num_attention_heads,
tgt_len,
src_len,
)
# check attn size
self.assertListEqual(
[layer_attention.shape for layer_attention in iter_attentions] , [expected_shape] * len(UpperCAmelCase ) )
def UpperCAmelCase ( self : Dict , UpperCAmelCase : int , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : Any , UpperCAmelCase : Optional[Any] , UpperCAmelCase : str=False , UpperCAmelCase : Optional[int]=1 ) -> Union[str, Any]:
self.assertIsInstance(UpperCAmelCase , UpperCAmelCase )
self.assertListEqual(
[isinstance(UpperCAmelCase , UpperCAmelCase ) for iter_hidden_states in hidden_states] , [True] * len(UpperCAmelCase ) , )
self.assertEqual(len(UpperCAmelCase ) , (max_length - min_length) * num_beam_groups )
for idx, iter_hidden_states in enumerate(UpperCAmelCase ):
# adds PAD dummy token
__lowerCAmelCase: Any = min_length + idx + 1
__lowerCAmelCase: str = (batch_size * num_beam_groups, seq_len, config.hidden_size)
# check hidden size
self.assertListEqual(
[layer_hidden_states.shape for layer_hidden_states in iter_hidden_states] , [expected_shape] * len(UpperCAmelCase ) , )
pass
@slow
def UpperCAmelCase ( self : int ) -> Tuple:
for model_name in XLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowerCAmelCase: List[Any] = XLMModel.from_pretrained(UpperCAmelCase )
self.assertIsNotNone(UpperCAmelCase )
@require_torch
class A_ ( unittest.TestCase ):
@slow
def UpperCAmelCase ( self : Tuple ) -> Union[str, Any]:
__lowerCAmelCase: Union[str, Any] = XLMWithLMHeadModel.from_pretrained('xlm-mlm-en-2048' )
model.to(UpperCAmelCase )
__lowerCAmelCase: Optional[int] = torch.tensor([[1_4, 4_4_7]] , dtype=torch.long , device=UpperCAmelCase ) # the president
__lowerCAmelCase: Union[str, Any] = [
1_4,
4_4_7,
1_4,
4_4_7,
1_4,
4_4_7,
1_4,
4_4_7,
1_4,
4_4_7,
1_4,
4_4_7,
1_4,
4_4_7,
1_4,
4_4_7,
1_4,
4_4_7,
1_4,
4_4_7,
] # the president the president the president the president the president the president the president the president the president the president
# TODO(PVP): this and other input_ids I tried for generation give pretty bad results. Not sure why. Model might just not be made for auto-regressive inference
__lowerCAmelCase: str = model.generate(UpperCAmelCase , do_sample=UpperCAmelCase )
self.assertListEqual(output_ids[0].cpu().numpy().tolist() , UpperCAmelCase )
| 322 |
import unittest
import numpy as np
from transformers import AlbertConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.albert.modeling_flax_albert import (
FlaxAlbertForMaskedLM,
FlaxAlbertForMultipleChoice,
FlaxAlbertForPreTraining,
FlaxAlbertForQuestionAnswering,
FlaxAlbertForSequenceClassification,
FlaxAlbertForTokenClassification,
FlaxAlbertModel,
)
class A_ ( unittest.TestCase ):
def __init__( self : List[str] , UpperCAmelCase : Tuple , UpperCAmelCase : Optional[Any]=1_3 , UpperCAmelCase : Optional[int]=7 , UpperCAmelCase : Tuple=True , UpperCAmelCase : str=True , UpperCAmelCase : List[Any]=True , UpperCAmelCase : Union[str, Any]=True , UpperCAmelCase : List[str]=9_9 , UpperCAmelCase : Optional[int]=3_2 , UpperCAmelCase : Dict=5 , UpperCAmelCase : int=4 , UpperCAmelCase : Optional[Any]=3_7 , UpperCAmelCase : List[str]="gelu" , UpperCAmelCase : Tuple=0.1 , UpperCAmelCase : int=0.1 , UpperCAmelCase : str=5_1_2 , UpperCAmelCase : Dict=1_6 , UpperCAmelCase : Union[str, Any]=2 , UpperCAmelCase : int=0.02 , UpperCAmelCase : List[Any]=4 , ) -> Optional[Any]:
__lowerCAmelCase: str = parent
__lowerCAmelCase: Dict = batch_size
__lowerCAmelCase: Optional[int] = seq_length
__lowerCAmelCase: Dict = is_training
__lowerCAmelCase: Optional[Any] = use_attention_mask
__lowerCAmelCase: List[Any] = use_token_type_ids
__lowerCAmelCase: Optional[int] = use_labels
__lowerCAmelCase: Optional[Any] = vocab_size
__lowerCAmelCase: Optional[Any] = hidden_size
__lowerCAmelCase: Tuple = num_hidden_layers
__lowerCAmelCase: List[str] = num_attention_heads
__lowerCAmelCase: int = intermediate_size
__lowerCAmelCase: Union[str, Any] = hidden_act
__lowerCAmelCase: List[Any] = hidden_dropout_prob
__lowerCAmelCase: List[str] = attention_probs_dropout_prob
__lowerCAmelCase: Optional[int] = max_position_embeddings
__lowerCAmelCase: Union[str, Any] = type_vocab_size
__lowerCAmelCase: int = type_sequence_label_size
__lowerCAmelCase: Union[str, Any] = initializer_range
__lowerCAmelCase: Any = num_choices
def UpperCAmelCase ( self : Union[str, Any] ) -> Tuple:
__lowerCAmelCase: List[str] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__lowerCAmelCase: List[Any] = None
if self.use_attention_mask:
__lowerCAmelCase: List[str] = random_attention_mask([self.batch_size, self.seq_length] )
__lowerCAmelCase: Optional[Any] = None
if self.use_token_type_ids:
__lowerCAmelCase: List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__lowerCAmelCase: Optional[int] = AlbertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=UpperCAmelCase , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def UpperCAmelCase ( self : Dict ) -> Any:
__lowerCAmelCase: Optional[int] = self.prepare_config_and_inputs()
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase: Optional[Any] = config_and_inputs
__lowerCAmelCase: Tuple = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': attention_mask}
return config, inputs_dict
@require_flax
class A_ ( snake_case__ , unittest.TestCase ):
_lowercase : Dict = (
(
FlaxAlbertModel,
FlaxAlbertForPreTraining,
FlaxAlbertForMaskedLM,
FlaxAlbertForMultipleChoice,
FlaxAlbertForQuestionAnswering,
FlaxAlbertForSequenceClassification,
FlaxAlbertForTokenClassification,
FlaxAlbertForQuestionAnswering,
)
if is_flax_available()
else ()
)
def UpperCAmelCase ( self : List[str] ) -> Optional[int]:
__lowerCAmelCase: List[Any] = FlaxAlbertModelTester(self )
@slow
def UpperCAmelCase ( self : Tuple ) -> Dict:
for model_class_name in self.all_model_classes:
__lowerCAmelCase: Optional[Any] = model_class_name.from_pretrained('albert-base-v2' )
__lowerCAmelCase: Dict = model(np.ones((1, 1) ) )
self.assertIsNotNone(UpperCAmelCase )
@require_flax
class A_ ( unittest.TestCase ):
@slow
def UpperCAmelCase ( self : Any ) -> Any:
__lowerCAmelCase: List[Any] = FlaxAlbertModel.from_pretrained('albert-base-v2' )
__lowerCAmelCase: Optional[int] = np.array([[0, 3_4_5, 2_3_2, 3_2_8, 7_4_0, 1_4_0, 1_6_9_5, 6_9, 6_0_7_8, 1_5_8_8, 2]] )
__lowerCAmelCase: Tuple = np.array([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
__lowerCAmelCase: Tuple = model(UpperCAmelCase , attention_mask=UpperCAmelCase )[0]
__lowerCAmelCase: str = (1, 1_1, 7_6_8)
self.assertEqual(output.shape , UpperCAmelCase )
__lowerCAmelCase: List[str] = np.array(
[[[-0.6513, 1.5035, -0.2766], [-0.6515, 1.5046, -0.2780], [-0.6512, 1.5049, -0.2784]]] )
self.assertTrue(jnp.allclose(output[:, 1:4, 1:4] , UpperCAmelCase , atol=1E-4 ) )
| 322 | 1 |
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ConditionalDetrImageProcessor
class A_ ( unittest.TestCase ):
def __init__( self : Tuple , UpperCAmelCase : List[Any] , UpperCAmelCase : Any=7 , UpperCAmelCase : Any=3 , UpperCAmelCase : Optional[Any]=3_0 , UpperCAmelCase : Optional[int]=4_0_0 , UpperCAmelCase : List[Any]=True , UpperCAmelCase : Tuple=None , UpperCAmelCase : Dict=True , UpperCAmelCase : str=[0.5, 0.5, 0.5] , UpperCAmelCase : Optional[Any]=[0.5, 0.5, 0.5] , UpperCAmelCase : List[Any]=True , UpperCAmelCase : Any=1 / 2_5_5 , UpperCAmelCase : Optional[Any]=True , ) -> Optional[int]:
# by setting size["longest_edge"] > max_resolution we're effectively not testing this :p
__lowerCAmelCase: List[str] = size if size is not None else {'shortest_edge': 1_8, 'longest_edge': 1_3_3_3}
__lowerCAmelCase: Optional[int] = parent
__lowerCAmelCase: Optional[int] = batch_size
__lowerCAmelCase: int = num_channels
__lowerCAmelCase: Optional[int] = min_resolution
__lowerCAmelCase: Union[str, Any] = max_resolution
__lowerCAmelCase: Union[str, Any] = do_resize
__lowerCAmelCase: str = size
__lowerCAmelCase: int = do_normalize
__lowerCAmelCase: Any = image_mean
__lowerCAmelCase: str = image_std
__lowerCAmelCase: Tuple = do_rescale
__lowerCAmelCase: Optional[int] = rescale_factor
__lowerCAmelCase: int = do_pad
def UpperCAmelCase ( self : Optional[Any] ) -> Any:
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def UpperCAmelCase ( self : Optional[int] , UpperCAmelCase : Optional[Any] , UpperCAmelCase : int=False ) -> Dict:
if not batched:
__lowerCAmelCase: List[str] = image_inputs[0]
if isinstance(UpperCAmelCase , Image.Image ):
__lowerCAmelCase , __lowerCAmelCase: Union[str, Any] = image.size
else:
__lowerCAmelCase , __lowerCAmelCase: int = image.shape[1], image.shape[2]
if w < h:
__lowerCAmelCase: Dict = int(self.size['shortest_edge'] * h / w )
__lowerCAmelCase: str = self.size['shortest_edge']
elif w > h:
__lowerCAmelCase: str = self.size['shortest_edge']
__lowerCAmelCase: Tuple = int(self.size['shortest_edge'] * w / h )
else:
__lowerCAmelCase: Any = self.size['shortest_edge']
__lowerCAmelCase: Optional[int] = self.size['shortest_edge']
else:
__lowerCAmelCase: int = []
for image in image_inputs:
__lowerCAmelCase , __lowerCAmelCase: List[str] = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
__lowerCAmelCase: str = max(UpperCAmelCase , key=lambda UpperCAmelCase : item[0] )[0]
__lowerCAmelCase: Union[str, Any] = max(UpperCAmelCase , key=lambda UpperCAmelCase : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class A_ ( snake_case__ , unittest.TestCase ):
_lowercase : Tuple = ConditionalDetrImageProcessor if is_vision_available() else None
def UpperCAmelCase ( self : Union[str, Any] ) -> Tuple:
__lowerCAmelCase: List[Any] = ConditionalDetrImageProcessingTester(self )
@property
def UpperCAmelCase ( self : Optional[int] ) -> Union[str, Any]:
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCAmelCase ( self : Optional[int] ) -> List[Any]:
__lowerCAmelCase: Any = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(UpperCAmelCase , 'image_mean' ) )
self.assertTrue(hasattr(UpperCAmelCase , 'image_std' ) )
self.assertTrue(hasattr(UpperCAmelCase , 'do_normalize' ) )
self.assertTrue(hasattr(UpperCAmelCase , 'do_resize' ) )
self.assertTrue(hasattr(UpperCAmelCase , 'size' ) )
def UpperCAmelCase ( self : Optional[int] ) -> Union[str, Any]:
__lowerCAmelCase: Tuple = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'shortest_edge': 1_8, 'longest_edge': 1_3_3_3} )
self.assertEqual(image_processor.do_pad , UpperCAmelCase )
__lowerCAmelCase: Tuple = self.image_processing_class.from_dict(
self.image_processor_dict , size=4_2 , max_size=8_4 , pad_and_return_pixel_mask=UpperCAmelCase )
self.assertEqual(image_processor.size , {'shortest_edge': 4_2, 'longest_edge': 8_4} )
self.assertEqual(image_processor.do_pad , UpperCAmelCase )
def UpperCAmelCase ( self : Tuple ) -> List[str]:
pass
def UpperCAmelCase ( self : Dict ) -> int:
# Initialize image_processing
__lowerCAmelCase: Tuple = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__lowerCAmelCase: int = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(UpperCAmelCase , Image.Image )
# Test not batched input
__lowerCAmelCase: List[Any] = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
__lowerCAmelCase , __lowerCAmelCase: Optional[int] = self.image_processor_tester.get_expected_values(UpperCAmelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
__lowerCAmelCase , __lowerCAmelCase: Dict = self.image_processor_tester.get_expected_values(UpperCAmelCase , batched=UpperCAmelCase )
__lowerCAmelCase: Tuple = image_processing(UpperCAmelCase , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def UpperCAmelCase ( self : str ) -> List[Any]:
# Initialize image_processing
__lowerCAmelCase: int = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__lowerCAmelCase: Tuple = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase , numpify=UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(UpperCAmelCase , np.ndarray )
# Test not batched input
__lowerCAmelCase: Union[str, Any] = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
__lowerCAmelCase , __lowerCAmelCase: List[str] = self.image_processor_tester.get_expected_values(UpperCAmelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
__lowerCAmelCase: Union[str, Any] = image_processing(UpperCAmelCase , return_tensors='pt' ).pixel_values
__lowerCAmelCase , __lowerCAmelCase: str = self.image_processor_tester.get_expected_values(UpperCAmelCase , batched=UpperCAmelCase )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def UpperCAmelCase ( self : str ) -> List[str]:
# Initialize image_processing
__lowerCAmelCase: str = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__lowerCAmelCase: Optional[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase , torchify=UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(UpperCAmelCase , torch.Tensor )
# Test not batched input
__lowerCAmelCase: Dict = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
__lowerCAmelCase , __lowerCAmelCase: Dict = self.image_processor_tester.get_expected_values(UpperCAmelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
__lowerCAmelCase: Dict = image_processing(UpperCAmelCase , return_tensors='pt' ).pixel_values
__lowerCAmelCase , __lowerCAmelCase: List[Any] = self.image_processor_tester.get_expected_values(UpperCAmelCase , batched=UpperCAmelCase )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
@slow
def UpperCAmelCase ( self : str ) -> Tuple:
# prepare image and target
__lowerCAmelCase: List[Any] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
with open('./tests/fixtures/tests_samples/COCO/coco_annotations.txt' , 'r' ) as f:
__lowerCAmelCase: Dict = json.loads(f.read() )
__lowerCAmelCase: int = {'image_id': 3_9_7_6_9, 'annotations': target}
# encode them
__lowerCAmelCase: Dict = ConditionalDetrImageProcessor.from_pretrained('microsoft/conditional-detr-resnet-50' )
__lowerCAmelCase: Optional[Any] = image_processing(images=UpperCAmelCase , annotations=UpperCAmelCase , return_tensors='pt' )
# verify pixel values
__lowerCAmelCase: Union[str, Any] = torch.Size([1, 3, 8_0_0, 1_0_6_6] )
self.assertEqual(encoding['pixel_values'].shape , UpperCAmelCase )
__lowerCAmelCase: Union[str, Any] = torch.tensor([0.2796, 0.3138, 0.3481] )
self.assertTrue(torch.allclose(encoding['pixel_values'][0, 0, 0, :3] , UpperCAmelCase , atol=1E-4 ) )
# verify area
__lowerCAmelCase: List[str] = torch.tensor([5887.9600, 11250.2061, 489353.8438, 837122.7500, 147967.5156, 165732.3438] )
self.assertTrue(torch.allclose(encoding['labels'][0]['area'] , UpperCAmelCase ) )
# verify boxes
__lowerCAmelCase: Dict = torch.Size([6, 4] )
self.assertEqual(encoding['labels'][0]['boxes'].shape , UpperCAmelCase )
__lowerCAmelCase: Tuple = torch.tensor([0.5503, 0.2765, 0.0604, 0.2215] )
self.assertTrue(torch.allclose(encoding['labels'][0]['boxes'][0] , UpperCAmelCase , atol=1E-3 ) )
# verify image_id
__lowerCAmelCase: List[Any] = torch.tensor([3_9_7_6_9] )
self.assertTrue(torch.allclose(encoding['labels'][0]['image_id'] , UpperCAmelCase ) )
# verify is_crowd
__lowerCAmelCase: Dict = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['labels'][0]['iscrowd'] , UpperCAmelCase ) )
# verify class_labels
__lowerCAmelCase: Union[str, Any] = torch.tensor([7_5, 7_5, 6_3, 6_5, 1_7, 1_7] )
self.assertTrue(torch.allclose(encoding['labels'][0]['class_labels'] , UpperCAmelCase ) )
# verify orig_size
__lowerCAmelCase: int = torch.tensor([4_8_0, 6_4_0] )
self.assertTrue(torch.allclose(encoding['labels'][0]['orig_size'] , UpperCAmelCase ) )
# verify size
__lowerCAmelCase: Any = torch.tensor([8_0_0, 1_0_6_6] )
self.assertTrue(torch.allclose(encoding['labels'][0]['size'] , UpperCAmelCase ) )
@slow
def UpperCAmelCase ( self : str ) -> Tuple:
# prepare image, target and masks_path
__lowerCAmelCase: Any = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
with open('./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt' , 'r' ) as f:
__lowerCAmelCase: Optional[int] = json.loads(f.read() )
__lowerCAmelCase: Optional[Any] = {'file_name': '000000039769.png', 'image_id': 3_9_7_6_9, 'segments_info': target}
__lowerCAmelCase: Tuple = pathlib.Path('./tests/fixtures/tests_samples/COCO/coco_panoptic' )
# encode them
__lowerCAmelCase: str = ConditionalDetrImageProcessor(format='coco_panoptic' )
__lowerCAmelCase: Optional[Any] = image_processing(images=UpperCAmelCase , annotations=UpperCAmelCase , masks_path=UpperCAmelCase , return_tensors='pt' )
# verify pixel values
__lowerCAmelCase: List[Any] = torch.Size([1, 3, 8_0_0, 1_0_6_6] )
self.assertEqual(encoding['pixel_values'].shape , UpperCAmelCase )
__lowerCAmelCase: List[Any] = torch.tensor([0.2796, 0.3138, 0.3481] )
self.assertTrue(torch.allclose(encoding['pixel_values'][0, 0, 0, :3] , UpperCAmelCase , atol=1E-4 ) )
# verify area
__lowerCAmelCase: List[str] = torch.tensor([147979.6875, 165527.0469, 484638.5938, 11292.9375, 5879.6562, 7634.1147] )
self.assertTrue(torch.allclose(encoding['labels'][0]['area'] , UpperCAmelCase ) )
# verify boxes
__lowerCAmelCase: List[str] = torch.Size([6, 4] )
self.assertEqual(encoding['labels'][0]['boxes'].shape , UpperCAmelCase )
__lowerCAmelCase: Tuple = torch.tensor([0.2625, 0.5437, 0.4688, 0.8625] )
self.assertTrue(torch.allclose(encoding['labels'][0]['boxes'][0] , UpperCAmelCase , atol=1E-3 ) )
# verify image_id
__lowerCAmelCase: Optional[int] = torch.tensor([3_9_7_6_9] )
self.assertTrue(torch.allclose(encoding['labels'][0]['image_id'] , UpperCAmelCase ) )
# verify is_crowd
__lowerCAmelCase: int = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['labels'][0]['iscrowd'] , UpperCAmelCase ) )
# verify class_labels
__lowerCAmelCase: Any = torch.tensor([1_7, 1_7, 6_3, 7_5, 7_5, 9_3] )
self.assertTrue(torch.allclose(encoding['labels'][0]['class_labels'] , UpperCAmelCase ) )
# verify masks
__lowerCAmelCase: Optional[int] = 8_2_2_8_7_3
self.assertEqual(encoding['labels'][0]['masks'].sum().item() , UpperCAmelCase )
# verify orig_size
__lowerCAmelCase: Union[str, Any] = torch.tensor([4_8_0, 6_4_0] )
self.assertTrue(torch.allclose(encoding['labels'][0]['orig_size'] , UpperCAmelCase ) )
# verify size
__lowerCAmelCase: Tuple = torch.tensor([8_0_0, 1_0_6_6] )
self.assertTrue(torch.allclose(encoding['labels'][0]['size'] , UpperCAmelCase ) )
| 322 |
import json
import os
import shutil
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import AutoConfig, BertConfig, GPTaConfig
from transformers.configuration_utils import PretrainedConfig
from transformers.testing_utils import TOKEN, USER, is_staging_test
sys.path.append(str(Path(__file__).parent.parent / '''utils'''))
from test_module.custom_configuration import CustomConfig # noqa E402
_a = {
'''return_dict''': False,
'''output_hidden_states''': True,
'''output_attentions''': True,
'''torchscript''': True,
'''torch_dtype''': '''float16''',
'''use_bfloat16''': True,
'''tf_legacy_loss''': True,
'''pruned_heads''': {'''a''': 1},
'''tie_word_embeddings''': False,
'''is_decoder''': True,
'''cross_attention_hidden_size''': 1_2_8,
'''add_cross_attention''': True,
'''tie_encoder_decoder''': True,
'''max_length''': 5_0,
'''min_length''': 3,
'''do_sample''': True,
'''early_stopping''': True,
'''num_beams''': 3,
'''num_beam_groups''': 3,
'''diversity_penalty''': 0.5,
'''temperature''': 2.0,
'''top_k''': 1_0,
'''top_p''': 0.7,
'''typical_p''': 0.2,
'''repetition_penalty''': 0.8,
'''length_penalty''': 0.8,
'''no_repeat_ngram_size''': 5,
'''encoder_no_repeat_ngram_size''': 5,
'''bad_words_ids''': [1, 2, 3],
'''num_return_sequences''': 3,
'''chunk_size_feed_forward''': 5,
'''output_scores''': True,
'''return_dict_in_generate''': True,
'''forced_bos_token_id''': 2,
'''forced_eos_token_id''': 3,
'''remove_invalid_values''': True,
'''architectures''': ['''BertModel'''],
'''finetuning_task''': '''translation''',
'''id2label''': {0: '''label'''},
'''label2id''': {'''label''': '''0'''},
'''tokenizer_class''': '''BertTokenizerFast''',
'''prefix''': '''prefix''',
'''bos_token_id''': 6,
'''pad_token_id''': 7,
'''eos_token_id''': 8,
'''sep_token_id''': 9,
'''decoder_start_token_id''': 1_0,
'''exponential_decay_length_penalty''': (5, 1.01),
'''suppress_tokens''': [0, 1],
'''begin_suppress_tokens''': 2,
'''task_specific_params''': {'''translation''': '''some_params'''},
'''problem_type''': '''regression''',
}
@is_staging_test
class A_ ( unittest.TestCase ):
@classmethod
def UpperCAmelCase ( cls : Dict ) -> List[str]:
__lowerCAmelCase: str = TOKEN
HfFolder.save_token(UpperCAmelCase )
@classmethod
def UpperCAmelCase ( cls : str ) -> List[Any]:
try:
delete_repo(token=cls._token , repo_id='test-config' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='valid_org/test-config-org' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='test-dynamic-config' )
except HTTPError:
pass
def UpperCAmelCase ( self : int ) -> Optional[int]:
__lowerCAmelCase: Any = BertConfig(
vocab_size=9_9 , hidden_size=3_2 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=3_7 )
config.push_to_hub('test-config' , use_auth_token=self._token )
__lowerCAmelCase: str = BertConfig.from_pretrained(F'''{USER}/test-config''' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(UpperCAmelCase , getattr(UpperCAmelCase , UpperCAmelCase ) )
# Reset repo
delete_repo(token=self._token , repo_id='test-config' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(UpperCAmelCase , repo_id='test-config' , push_to_hub=UpperCAmelCase , use_auth_token=self._token )
__lowerCAmelCase: Union[str, Any] = BertConfig.from_pretrained(F'''{USER}/test-config''' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(UpperCAmelCase , getattr(UpperCAmelCase , UpperCAmelCase ) )
def UpperCAmelCase ( self : int ) -> Dict:
__lowerCAmelCase: int = BertConfig(
vocab_size=9_9 , hidden_size=3_2 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=3_7 )
config.push_to_hub('valid_org/test-config-org' , use_auth_token=self._token )
__lowerCAmelCase: Dict = BertConfig.from_pretrained('valid_org/test-config-org' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(UpperCAmelCase , getattr(UpperCAmelCase , UpperCAmelCase ) )
# Reset repo
delete_repo(token=self._token , repo_id='valid_org/test-config-org' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(
UpperCAmelCase , repo_id='valid_org/test-config-org' , push_to_hub=UpperCAmelCase , use_auth_token=self._token )
__lowerCAmelCase: int = BertConfig.from_pretrained('valid_org/test-config-org' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(UpperCAmelCase , getattr(UpperCAmelCase , UpperCAmelCase ) )
def UpperCAmelCase ( self : Union[str, Any] ) -> List[str]:
CustomConfig.register_for_auto_class()
__lowerCAmelCase: Any = CustomConfig(attribute=4_2 )
config.push_to_hub('test-dynamic-config' , use_auth_token=self._token )
# This has added the proper auto_map field to the config
self.assertDictEqual(config.auto_map , {'AutoConfig': 'custom_configuration.CustomConfig'} )
__lowerCAmelCase: int = AutoConfig.from_pretrained(F'''{USER}/test-dynamic-config''' , trust_remote_code=UpperCAmelCase )
# Can't make an isinstance check because the new_config is from the FakeConfig class of a dynamic module
self.assertEqual(new_config.__class__.__name__ , 'CustomConfig' )
self.assertEqual(new_config.attribute , 4_2 )
class A_ ( unittest.TestCase ):
def UpperCAmelCase ( self : Union[str, Any] ) -> int:
__lowerCAmelCase: List[Any] = GPTaConfig()
# attempt to modify each of int/float/bool/str config records and verify they were updated
__lowerCAmelCase: Union[str, Any] = c.n_embd + 1 # int
__lowerCAmelCase: str = c.resid_pdrop + 1.0 # float
__lowerCAmelCase: List[Any] = not c.scale_attn_weights # bool
__lowerCAmelCase: List[str] = c.summary_type + 'foo' # str
c.update_from_string(
F'''n_embd={n_embd},resid_pdrop={resid_pdrop},scale_attn_weights={scale_attn_weights},summary_type={summary_type}''' )
self.assertEqual(UpperCAmelCase , c.n_embd , 'mismatch for key: n_embd' )
self.assertEqual(UpperCAmelCase , c.resid_pdrop , 'mismatch for key: resid_pdrop' )
self.assertEqual(UpperCAmelCase , c.scale_attn_weights , 'mismatch for key: scale_attn_weights' )
self.assertEqual(UpperCAmelCase , c.summary_type , 'mismatch for key: summary_type' )
def UpperCAmelCase ( self : Optional[Any] ) -> Any:
__lowerCAmelCase: str = PretrainedConfig()
__lowerCAmelCase: Optional[int] = [key for key in base_config.__dict__ if key not in config_common_kwargs]
# If this part of the test fails, you have arguments to addin config_common_kwargs above.
self.assertListEqual(
UpperCAmelCase , ['is_encoder_decoder', '_name_or_path', '_commit_hash', 'transformers_version'] )
__lowerCAmelCase: int = [key for key, value in config_common_kwargs.items() if value == getattr(UpperCAmelCase , UpperCAmelCase )]
if len(UpperCAmelCase ) > 0:
raise ValueError(
'The following keys are set with the default values in'
' `test_configuration_common.config_common_kwargs` pick another value for them:'
F''' {', '.join(UpperCAmelCase )}.''' )
def UpperCAmelCase ( self : int ) -> Optional[Any]:
with self.assertRaises(UpperCAmelCase ):
# config is in subfolder, the following should not work without specifying the subfolder
__lowerCAmelCase: List[Any] = BertConfig.from_pretrained('hf-internal-testing/tiny-random-bert-subfolder' )
__lowerCAmelCase: List[str] = BertConfig.from_pretrained('hf-internal-testing/tiny-random-bert-subfolder' , subfolder='bert' )
self.assertIsNotNone(UpperCAmelCase )
def UpperCAmelCase ( self : Tuple ) -> List[Any]:
# A mock response for an HTTP head request to emulate server down
__lowerCAmelCase: Union[str, Any] = mock.Mock()
__lowerCAmelCase: str = 5_0_0
__lowerCAmelCase: Optional[Any] = {}
__lowerCAmelCase: Optional[int] = HTTPError
__lowerCAmelCase: List[Any] = {}
# Download this model to make sure it's in the cache.
__lowerCAmelCase: Tuple = BertConfig.from_pretrained('hf-internal-testing/tiny-random-bert' )
# Under the mock environment we get a 500 error when trying to reach the model.
with mock.patch('requests.Session.request' , return_value=UpperCAmelCase ) as mock_head:
__lowerCAmelCase: Union[str, Any] = BertConfig.from_pretrained('hf-internal-testing/tiny-random-bert' )
# This check we did call the fake head request
mock_head.assert_called()
def UpperCAmelCase ( self : Any ) -> Optional[Any]:
# This test is for deprecated behavior and can be removed in v5
__lowerCAmelCase: Tuple = BertConfig.from_pretrained(
'https://huggingface.co/hf-internal-testing/tiny-random-bert/resolve/main/config.json' )
def UpperCAmelCase ( self : Dict ) -> str:
__lowerCAmelCase: Optional[Any] = AutoConfig.from_pretrained('bert-base-cased' )
__lowerCAmelCase: Optional[Any] = ['config.4.0.0.json']
with tempfile.TemporaryDirectory() as tmp_dir:
configuration.save_pretrained(UpperCAmelCase )
__lowerCAmelCase: Tuple = 2
json.dump(configuration.to_dict() , open(os.path.join(UpperCAmelCase , 'config.4.0.0.json' ) , 'w' ) )
# This should pick the new configuration file as the version of Transformers is > 4.0.0
__lowerCAmelCase: Dict = AutoConfig.from_pretrained(UpperCAmelCase )
self.assertEqual(new_configuration.hidden_size , 2 )
# Will need to be adjusted if we reach v42 and this test is still here.
# Should pick the old configuration file as the version of Transformers is < 4.42.0
__lowerCAmelCase: Dict = ['config.42.0.0.json']
__lowerCAmelCase: Optional[int] = 7_6_8
configuration.save_pretrained(UpperCAmelCase )
shutil.move(os.path.join(UpperCAmelCase , 'config.4.0.0.json' ) , os.path.join(UpperCAmelCase , 'config.42.0.0.json' ) )
__lowerCAmelCase: int = AutoConfig.from_pretrained(UpperCAmelCase )
self.assertEqual(new_configuration.hidden_size , 7_6_8 )
def UpperCAmelCase ( self : Union[str, Any] ) -> Dict:
# This repo has two configuration files, one for v4.0.0 and above with a different hidden size.
__lowerCAmelCase: Tuple = 'hf-internal-testing/test-two-configs'
import transformers as new_transformers
__lowerCAmelCase: List[Any] = 'v4.0.0'
__lowerCAmelCase , __lowerCAmelCase: Any = new_transformers.models.auto.AutoConfig.from_pretrained(
UpperCAmelCase , return_unused_kwargs=UpperCAmelCase )
self.assertEqual(new_configuration.hidden_size , 2 )
# This checks `_configuration_file` ia not kept in the kwargs by mistake.
self.assertDictEqual(UpperCAmelCase , {} )
# Testing an older version by monkey-patching the version in the module it's used.
import transformers as old_transformers
__lowerCAmelCase: List[Any] = 'v3.0.0'
__lowerCAmelCase: Union[str, Any] = old_transformers.models.auto.AutoConfig.from_pretrained(UpperCAmelCase )
self.assertEqual(old_configuration.hidden_size , 7_6_8 )
| 322 | 1 |
# using dfs for finding eulerian path traversal
def _a ( SCREAMING_SNAKE_CASE : Dict , SCREAMING_SNAKE_CASE : Dict , SCREAMING_SNAKE_CASE : Any , SCREAMING_SNAKE_CASE : List[str]=None ) -> Any:
"""simple docstring"""
__lowerCAmelCase: Dict = (path or []) + [u]
for v in graph[u]:
if visited_edge[u][v] is False:
__lowerCAmelCase , __lowerCAmelCase: List[Any] = True, True
__lowerCAmelCase: Dict = dfs(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
return path
def _a ( SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : Optional[int] ) -> Any:
"""simple docstring"""
__lowerCAmelCase: str = 0
__lowerCAmelCase: List[Any] = -1
for i in range(SCREAMING_SNAKE_CASE ):
if i not in graph.keys():
continue
if len(graph[i] ) % 2 == 1:
odd_degree_nodes += 1
__lowerCAmelCase: Any = i
if odd_degree_nodes == 0:
return 1, odd_node
if odd_degree_nodes == 2:
return 2, odd_node
return 3, odd_node
def _a ( SCREAMING_SNAKE_CASE : Any , SCREAMING_SNAKE_CASE : Dict ) -> Any:
"""simple docstring"""
__lowerCAmelCase: List[Any] = [[False for _ in range(max_node + 1 )] for _ in range(max_node + 1 )]
__lowerCAmelCase , __lowerCAmelCase: int = check_circuit_or_path(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
if check == 3:
print('graph is not Eulerian' )
print('no path' )
return
__lowerCAmelCase: Optional[int] = 1
if check == 2:
__lowerCAmelCase: Dict = odd_node
print('graph has a Euler path' )
if check == 1:
print('graph has a Euler cycle' )
__lowerCAmelCase: List[Any] = dfs(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
print(SCREAMING_SNAKE_CASE )
def _a ( ) -> Tuple:
"""simple docstring"""
__lowerCAmelCase: Tuple = {1: [2, 3, 4], 2: [1, 3], 3: [1, 2], 4: [1, 5], 5: [4]}
__lowerCAmelCase: List[str] = {1: [2, 3, 4, 5], 2: [1, 3], 3: [1, 2], 4: [1, 5], 5: [1, 4]}
__lowerCAmelCase: Dict = {1: [2, 3, 4], 2: [1, 3, 4], 3: [1, 2], 4: [1, 2, 5], 5: [4]}
__lowerCAmelCase: int = {1: [2, 3], 2: [1, 3], 3: [1, 2]}
__lowerCAmelCase: List[Any] = {
1: [],
2: []
# all degree is zero
}
__lowerCAmelCase: Tuple = 10
check_euler(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
check_euler(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
check_euler(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
check_euler(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
check_euler(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
main()
| 322 |
_a = [sum(int(c, 1_0) ** 2 for c in i.__str__()) for i in range(1_0_0_0_0_0)]
def _a ( SCREAMING_SNAKE_CASE : int ) -> int:
"""simple docstring"""
__lowerCAmelCase: Optional[int] = 0
while number:
# Increased Speed Slightly by checking every 5 digits together.
sum_of_digits_squared += DIGITS_SQUARED[number % 10_00_00]
number //= 10_00_00
return sum_of_digits_squared
# There are 2 Chains made,
# One ends with 89 with the chain member 58 being the one which when declared first,
# there will be the least number of iterations for all the members to be checked.
# The other one ends with 1 and has only one element 1.
# So 58 and 1 are chosen to be declared at the starting.
# Changed dictionary to an array to quicken the solution
_a = [None] * 1_0_0_0_0_0_0_0
_a = True
_a = False
def _a ( SCREAMING_SNAKE_CASE : int ) -> bool:
"""simple docstring"""
if CHAINS[number - 1] is not None:
return CHAINS[number - 1] # type: ignore
__lowerCAmelCase: int = chain(next_number(SCREAMING_SNAKE_CASE ) )
__lowerCAmelCase: Tuple = number_chain
while number < 10_00_00_00:
__lowerCAmelCase: Dict = number_chain
number *= 10
return number_chain
def _a ( SCREAMING_SNAKE_CASE : int = 10_00_00_00 ) -> int:
"""simple docstring"""
for i in range(1 , SCREAMING_SNAKE_CASE ):
if CHAINS[i] is None:
chain(i + 1 )
return CHAINS[:number].count(SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
import doctest
doctest.testmod()
print(f"{solution() = }")
| 322 | 1 |
from typing import List, Optional, Tuple
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_herbert import HerbertTokenizer
_a = logging.get_logger(__name__)
_a = {'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt''', '''tokenizer_file''': '''tokenizer.json'''}
_a = {
'''vocab_file''': {
'''allegro/herbert-base-cased''': '''https://huggingface.co/allegro/herbert-base-cased/resolve/main/vocab.json'''
},
'''merges_file''': {
'''allegro/herbert-base-cased''': '''https://huggingface.co/allegro/herbert-base-cased/resolve/main/merges.txt'''
},
}
_a = {'''allegro/herbert-base-cased''': 5_1_4}
_a = {}
class A_ ( snake_case__ ):
_lowercase : List[str] = VOCAB_FILES_NAMES
_lowercase : str = PRETRAINED_VOCAB_FILES_MAP
_lowercase : Optional[Any] = PRETRAINED_INIT_CONFIGURATION
_lowercase : Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_lowercase : List[Any] = HerbertTokenizer
def __init__( self : Optional[Any] , UpperCAmelCase : Optional[int]=None , UpperCAmelCase : Dict=None , UpperCAmelCase : Optional[int]=None , UpperCAmelCase : List[Any]="<s>" , UpperCAmelCase : Optional[Any]="<unk>" , UpperCAmelCase : Optional[int]="<pad>" , UpperCAmelCase : Tuple="<mask>" , UpperCAmelCase : List[Any]="</s>" , **UpperCAmelCase : Union[str, Any] , ) -> int:
super().__init__(
UpperCAmelCase , UpperCAmelCase , tokenizer_file=UpperCAmelCase , cls_token=UpperCAmelCase , unk_token=UpperCAmelCase , pad_token=UpperCAmelCase , mask_token=UpperCAmelCase , sep_token=UpperCAmelCase , **UpperCAmelCase , )
def UpperCAmelCase ( self : List[Any] , UpperCAmelCase : List[int] , UpperCAmelCase : Optional[List[int]] = None ) -> List[int]:
__lowerCAmelCase: Any = [self.cls_token_id]
__lowerCAmelCase: Optional[int] = [self.sep_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def UpperCAmelCase ( self : List[str] , UpperCAmelCase : List[int] , UpperCAmelCase : Optional[List[int]] = None , UpperCAmelCase : bool = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=UpperCAmelCase , token_ids_a=UpperCAmelCase , already_has_special_tokens=UpperCAmelCase )
if token_ids_a is None:
return [1] + ([0] * len(UpperCAmelCase )) + [1]
return [1] + ([0] * len(UpperCAmelCase )) + [1] + ([0] * len(UpperCAmelCase )) + [1]
def UpperCAmelCase ( self : Optional[int] , UpperCAmelCase : List[int] , UpperCAmelCase : Optional[List[int]] = None ) -> List[int]:
__lowerCAmelCase: List[str] = [self.sep_token_id]
__lowerCAmelCase: Optional[int] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def UpperCAmelCase ( self : Optional[int] , UpperCAmelCase : str , UpperCAmelCase : Optional[str] = None ) -> Tuple[str]:
__lowerCAmelCase: List[str] = self._tokenizer.model.save(UpperCAmelCase , name=UpperCAmelCase )
return tuple(UpperCAmelCase )
| 322 |
def _a ( SCREAMING_SNAKE_CASE : int ) -> bool:
"""simple docstring"""
if not isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
__lowerCAmelCase: List[Any] = f'''Input value of [number={number}] must be an integer'''
raise TypeError(SCREAMING_SNAKE_CASE )
if number < 0:
return False
__lowerCAmelCase: str = number * number
while number > 0:
if number % 10 != number_square % 10:
return False
number //= 10
number_square //= 10
return True
if __name__ == "__main__":
import doctest
doctest.testmod()
| 322 | 1 |
from __future__ import annotations
from bisect import bisect_left
from functools import total_ordering
from heapq import merge
@total_ordering
class A_ ( snake_case__ ):
def __lt__( self : List[Any] , UpperCAmelCase : List[Any] ) -> Union[str, Any]:
return self[-1] < other[-1]
def __eq__( self : Optional[int] , UpperCAmelCase : Union[str, Any] ) -> Union[str, Any]:
return self[-1] == other[-1]
def _a ( SCREAMING_SNAKE_CASE : list ) -> list:
"""simple docstring"""
__lowerCAmelCase: list[Stack] = []
# sort into stacks
for element in collection:
__lowerCAmelCase: Tuple = Stack([element] )
__lowerCAmelCase: Union[str, Any] = bisect_left(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
if i != len(SCREAMING_SNAKE_CASE ):
stacks[i].append(SCREAMING_SNAKE_CASE )
else:
stacks.append(SCREAMING_SNAKE_CASE )
# use a heap-based merge to merge stack efficiently
__lowerCAmelCase: Optional[int] = merge(*(reversed(SCREAMING_SNAKE_CASE ) for stack in stacks) )
return collection
if __name__ == "__main__":
_a = input('''Enter numbers separated by a comma:\n''').strip()
_a = [int(item) for item in user_input.split(''',''')]
print(patience_sort(unsorted))
| 322 |
import unittest
from transformers import XLMConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
XLMForMultipleChoice,
XLMForQuestionAnswering,
XLMForQuestionAnsweringSimple,
XLMForSequenceClassification,
XLMForTokenClassification,
XLMModel,
XLMWithLMHeadModel,
)
from transformers.models.xlm.modeling_xlm import XLM_PRETRAINED_MODEL_ARCHIVE_LIST
class A_ :
def __init__( self : Any , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : str=1_3 , UpperCAmelCase : Optional[Any]=7 , UpperCAmelCase : str=True , UpperCAmelCase : Any=True , UpperCAmelCase : Tuple=True , UpperCAmelCase : Any=True , UpperCAmelCase : Optional[int]=True , UpperCAmelCase : List[str]=False , UpperCAmelCase : Tuple=False , UpperCAmelCase : int=False , UpperCAmelCase : Optional[int]=2 , UpperCAmelCase : Any=9_9 , UpperCAmelCase : str=0 , UpperCAmelCase : Dict=3_2 , UpperCAmelCase : int=5 , UpperCAmelCase : Optional[int]=4 , UpperCAmelCase : Any=0.1 , UpperCAmelCase : str=0.1 , UpperCAmelCase : int=5_1_2 , UpperCAmelCase : str=2 , UpperCAmelCase : Optional[int]=0.02 , UpperCAmelCase : Optional[Any]=2 , UpperCAmelCase : List[str]=4 , UpperCAmelCase : Dict="last" , UpperCAmelCase : int=True , UpperCAmelCase : Dict=None , UpperCAmelCase : Union[str, Any]=0 , ) -> Dict:
__lowerCAmelCase: Optional[int] = parent
__lowerCAmelCase: Dict = batch_size
__lowerCAmelCase: Tuple = seq_length
__lowerCAmelCase: Tuple = is_training
__lowerCAmelCase: Optional[Any] = use_input_lengths
__lowerCAmelCase: List[str] = use_token_type_ids
__lowerCAmelCase: Dict = use_labels
__lowerCAmelCase: int = gelu_activation
__lowerCAmelCase: Optional[int] = sinusoidal_embeddings
__lowerCAmelCase: Tuple = causal
__lowerCAmelCase: Optional[Any] = asm
__lowerCAmelCase: int = n_langs
__lowerCAmelCase: Tuple = vocab_size
__lowerCAmelCase: List[Any] = n_special
__lowerCAmelCase: List[Any] = hidden_size
__lowerCAmelCase: Union[str, Any] = num_hidden_layers
__lowerCAmelCase: Dict = num_attention_heads
__lowerCAmelCase: int = hidden_dropout_prob
__lowerCAmelCase: List[str] = attention_probs_dropout_prob
__lowerCAmelCase: Dict = max_position_embeddings
__lowerCAmelCase: List[str] = type_sequence_label_size
__lowerCAmelCase: str = initializer_range
__lowerCAmelCase: List[str] = num_labels
__lowerCAmelCase: List[str] = num_choices
__lowerCAmelCase: Optional[int] = summary_type
__lowerCAmelCase: Any = use_proj
__lowerCAmelCase: Optional[Any] = scope
__lowerCAmelCase: Dict = bos_token_id
def UpperCAmelCase ( self : Union[str, Any] ) -> Tuple:
__lowerCAmelCase: Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__lowerCAmelCase: str = random_attention_mask([self.batch_size, self.seq_length] )
__lowerCAmelCase: Any = None
if self.use_input_lengths:
__lowerCAmelCase: Optional[Any] = (
ids_tensor([self.batch_size] , vocab_size=2 ) + self.seq_length - 2
) # small variation of seq_length
__lowerCAmelCase: str = None
if self.use_token_type_ids:
__lowerCAmelCase: Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.n_langs )
__lowerCAmelCase: int = None
__lowerCAmelCase: Optional[int] = None
__lowerCAmelCase: Optional[int] = None
if self.use_labels:
__lowerCAmelCase: Tuple = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__lowerCAmelCase: Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__lowerCAmelCase: Optional[int] = ids_tensor([self.batch_size] , 2 ).float()
__lowerCAmelCase: str = ids_tensor([self.batch_size] , self.num_choices )
__lowerCAmelCase: Dict = self.get_config()
return (
config,
input_ids,
token_type_ids,
input_lengths,
sequence_labels,
token_labels,
is_impossible_labels,
choice_labels,
input_mask,
)
def UpperCAmelCase ( self : Tuple ) -> List[Any]:
return XLMConfig(
vocab_size=self.vocab_size , n_special=self.n_special , emb_dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , gelu_activation=self.gelu_activation , sinusoidal_embeddings=self.sinusoidal_embeddings , asm=self.asm , causal=self.causal , n_langs=self.n_langs , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , summary_type=self.summary_type , use_proj=self.use_proj , num_labels=self.num_labels , bos_token_id=self.bos_token_id , )
def UpperCAmelCase ( self : int , UpperCAmelCase : Optional[Any] , UpperCAmelCase : Tuple , UpperCAmelCase : Optional[int] , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : Dict , UpperCAmelCase : Optional[Any] , UpperCAmelCase : int , UpperCAmelCase : int , UpperCAmelCase : List[str] , ) -> Optional[int]:
__lowerCAmelCase: List[str] = XLMModel(config=UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
__lowerCAmelCase: Any = model(UpperCAmelCase , lengths=UpperCAmelCase , langs=UpperCAmelCase )
__lowerCAmelCase: List[str] = model(UpperCAmelCase , langs=UpperCAmelCase )
__lowerCAmelCase: List[str] = model(UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCAmelCase ( self : Union[str, Any] , UpperCAmelCase : Optional[Any] , UpperCAmelCase : Optional[int] , UpperCAmelCase : Optional[int] , UpperCAmelCase : Dict , UpperCAmelCase : Any , UpperCAmelCase : List[Any] , UpperCAmelCase : Tuple , UpperCAmelCase : Tuple , UpperCAmelCase : Dict , ) -> int:
__lowerCAmelCase: str = XLMWithLMHeadModel(UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
__lowerCAmelCase: Union[str, Any] = model(UpperCAmelCase , token_type_ids=UpperCAmelCase , labels=UpperCAmelCase )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCAmelCase ( self : Optional[int] , UpperCAmelCase : Optional[int] , UpperCAmelCase : Dict , UpperCAmelCase : Optional[int] , UpperCAmelCase : Any , UpperCAmelCase : Optional[int] , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : List[str] , UpperCAmelCase : str , UpperCAmelCase : Dict , ) -> List[str]:
__lowerCAmelCase: Dict = XLMForQuestionAnsweringSimple(UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
__lowerCAmelCase: str = model(UpperCAmelCase )
__lowerCAmelCase: List[str] = model(UpperCAmelCase , start_positions=UpperCAmelCase , end_positions=UpperCAmelCase )
__lowerCAmelCase: Optional[Any] = outputs
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def UpperCAmelCase ( self : List[str] , UpperCAmelCase : Dict , UpperCAmelCase : List[Any] , UpperCAmelCase : Optional[int] , UpperCAmelCase : Optional[Any] , UpperCAmelCase : str , UpperCAmelCase : List[str] , UpperCAmelCase : List[Any] , UpperCAmelCase : Any , UpperCAmelCase : Optional[int] , ) -> Tuple:
__lowerCAmelCase: Union[str, Any] = XLMForQuestionAnswering(UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
__lowerCAmelCase: List[str] = model(UpperCAmelCase )
__lowerCAmelCase: Union[str, Any] = model(
UpperCAmelCase , start_positions=UpperCAmelCase , end_positions=UpperCAmelCase , cls_index=UpperCAmelCase , is_impossible=UpperCAmelCase , p_mask=UpperCAmelCase , )
__lowerCAmelCase: Any = model(
UpperCAmelCase , start_positions=UpperCAmelCase , end_positions=UpperCAmelCase , cls_index=UpperCAmelCase , is_impossible=UpperCAmelCase , )
((__lowerCAmelCase) , ): List[str] = result_with_labels.to_tuple()
__lowerCAmelCase: Union[str, Any] = model(UpperCAmelCase , start_positions=UpperCAmelCase , end_positions=UpperCAmelCase )
((__lowerCAmelCase) , ): List[Any] = result_with_labels.to_tuple()
self.parent.assertEqual(result_with_labels.loss.shape , () )
self.parent.assertEqual(result.start_top_log_probs.shape , (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(result.start_top_index.shape , (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(
result.end_top_log_probs.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(
result.end_top_index.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(result.cls_logits.shape , (self.batch_size,) )
def UpperCAmelCase ( self : Dict , UpperCAmelCase : Dict , UpperCAmelCase : Optional[Any] , UpperCAmelCase : Optional[Any] , UpperCAmelCase : List[str] , UpperCAmelCase : List[str] , UpperCAmelCase : Any , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : str , UpperCAmelCase : List[str] , ) -> List[Any]:
__lowerCAmelCase: Optional[Any] = XLMForSequenceClassification(UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
__lowerCAmelCase: List[Any] = model(UpperCAmelCase )
__lowerCAmelCase: Tuple = model(UpperCAmelCase , labels=UpperCAmelCase )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def UpperCAmelCase ( self : Optional[Any] , UpperCAmelCase : str , UpperCAmelCase : int , UpperCAmelCase : str , UpperCAmelCase : Tuple , UpperCAmelCase : Optional[Any] , UpperCAmelCase : List[str] , UpperCAmelCase : List[str] , UpperCAmelCase : Dict , UpperCAmelCase : Union[str, Any] , ) -> List[Any]:
__lowerCAmelCase: Union[str, Any] = self.num_labels
__lowerCAmelCase: Tuple = XLMForTokenClassification(UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
__lowerCAmelCase: Optional[int] = model(UpperCAmelCase , attention_mask=UpperCAmelCase , labels=UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def UpperCAmelCase ( self : str , UpperCAmelCase : Tuple , UpperCAmelCase : str , UpperCAmelCase : Any , UpperCAmelCase : str , UpperCAmelCase : str , UpperCAmelCase : str , UpperCAmelCase : str , UpperCAmelCase : List[str] , UpperCAmelCase : List[Any] , ) -> Union[str, Any]:
__lowerCAmelCase: List[Any] = self.num_choices
__lowerCAmelCase: Optional[Any] = XLMForMultipleChoice(config=UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
__lowerCAmelCase: List[Any] = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__lowerCAmelCase: List[str] = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__lowerCAmelCase: Dict = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__lowerCAmelCase: Any = model(
UpperCAmelCase , attention_mask=UpperCAmelCase , token_type_ids=UpperCAmelCase , labels=UpperCAmelCase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def UpperCAmelCase ( self : Tuple ) -> int:
__lowerCAmelCase: Optional[Any] = self.prepare_config_and_inputs()
(
(
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) ,
): Union[str, Any] = config_and_inputs
__lowerCAmelCase: Any = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'lengths': input_lengths}
return config, inputs_dict
@require_torch
class A_ ( snake_case__ , snake_case__ , snake_case__ , unittest.TestCase ):
_lowercase : Any = (
(
XLMModel,
XLMWithLMHeadModel,
XLMForQuestionAnswering,
XLMForSequenceClassification,
XLMForQuestionAnsweringSimple,
XLMForTokenClassification,
XLMForMultipleChoice,
)
if is_torch_available()
else ()
)
_lowercase : Any = (
(XLMWithLMHeadModel,) if is_torch_available() else ()
) # TODO (PVP): Check other models whether language generation is also applicable
_lowercase : Optional[int] = (
{
'feature-extraction': XLMModel,
'fill-mask': XLMWithLMHeadModel,
'question-answering': XLMForQuestionAnsweringSimple,
'text-classification': XLMForSequenceClassification,
'text-generation': XLMWithLMHeadModel,
'token-classification': XLMForTokenClassification,
'zero-shot': XLMForSequenceClassification,
}
if is_torch_available()
else {}
)
def UpperCAmelCase ( self : Any , UpperCAmelCase : List[Any] , UpperCAmelCase : Optional[Any] , UpperCAmelCase : str , UpperCAmelCase : str , UpperCAmelCase : str ) -> int:
if (
pipeline_test_casse_name == "QAPipelineTests"
and tokenizer_name is not None
and not tokenizer_name.endswith('Fast' )
):
# `QAPipelineTests` fails for a few models when the slower tokenizer are used.
# (The slower tokenizers were never used for pipeline tests before the pipeline testing rework)
# TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer
return True
return False
def UpperCAmelCase ( self : Any , UpperCAmelCase : Optional[Any] , UpperCAmelCase : Tuple , UpperCAmelCase : Tuple=False ) -> Dict:
__lowerCAmelCase: Optional[Any] = super()._prepare_for_class(UpperCAmelCase , UpperCAmelCase , return_labels=UpperCAmelCase )
if return_labels:
if model_class.__name__ == "XLMForQuestionAnswering":
__lowerCAmelCase: str = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=UpperCAmelCase )
__lowerCAmelCase: Optional[Any] = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=UpperCAmelCase )
return inputs_dict
def UpperCAmelCase ( self : Union[str, Any] ) -> int:
__lowerCAmelCase: int = XLMModelTester(self )
__lowerCAmelCase: Optional[int] = ConfigTester(self , config_class=UpperCAmelCase , emb_dim=3_7 )
def UpperCAmelCase ( self : List[str] ) -> List[Any]:
self.config_tester.run_common_tests()
def UpperCAmelCase ( self : Dict ) -> List[Any]:
__lowerCAmelCase: str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_model(*UpperCAmelCase )
def UpperCAmelCase ( self : List[Any] ) -> int:
__lowerCAmelCase: Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_lm_head(*UpperCAmelCase )
def UpperCAmelCase ( self : Tuple ) -> Tuple:
__lowerCAmelCase: Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_simple_qa(*UpperCAmelCase )
def UpperCAmelCase ( self : Optional[Any] ) -> Tuple:
__lowerCAmelCase: Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_qa(*UpperCAmelCase )
def UpperCAmelCase ( self : Optional[Any] ) -> Any:
__lowerCAmelCase: List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_sequence_classif(*UpperCAmelCase )
def UpperCAmelCase ( self : Tuple ) -> Tuple:
__lowerCAmelCase: Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_token_classif(*UpperCAmelCase )
def UpperCAmelCase ( self : Any ) -> Any:
__lowerCAmelCase: Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_for_multiple_choice(*UpperCAmelCase )
def UpperCAmelCase ( self : Optional[Any] , UpperCAmelCase : int , UpperCAmelCase : Tuple , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : Dict , UpperCAmelCase : Optional[Any] , UpperCAmelCase : Optional[int]=False , UpperCAmelCase : Dict=1 ) -> Dict:
self.assertIsInstance(UpperCAmelCase , UpperCAmelCase )
self.assertListEqual(
[isinstance(UpperCAmelCase , UpperCAmelCase ) for iter_attentions in attentions] , [True] * len(UpperCAmelCase ) )
self.assertEqual(len(UpperCAmelCase ) , (max_length - min_length) * num_beam_groups )
for idx, iter_attentions in enumerate(UpperCAmelCase ):
# adds PAD dummy token
__lowerCAmelCase: int = min_length + idx + 1
__lowerCAmelCase: Union[str, Any] = min_length + idx + 1
__lowerCAmelCase: Any = (
batch_size * num_beam_groups,
config.num_attention_heads,
tgt_len,
src_len,
)
# check attn size
self.assertListEqual(
[layer_attention.shape for layer_attention in iter_attentions] , [expected_shape] * len(UpperCAmelCase ) )
def UpperCAmelCase ( self : Dict , UpperCAmelCase : int , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : Any , UpperCAmelCase : Optional[Any] , UpperCAmelCase : str=False , UpperCAmelCase : Optional[int]=1 ) -> Union[str, Any]:
self.assertIsInstance(UpperCAmelCase , UpperCAmelCase )
self.assertListEqual(
[isinstance(UpperCAmelCase , UpperCAmelCase ) for iter_hidden_states in hidden_states] , [True] * len(UpperCAmelCase ) , )
self.assertEqual(len(UpperCAmelCase ) , (max_length - min_length) * num_beam_groups )
for idx, iter_hidden_states in enumerate(UpperCAmelCase ):
# adds PAD dummy token
__lowerCAmelCase: Any = min_length + idx + 1
__lowerCAmelCase: str = (batch_size * num_beam_groups, seq_len, config.hidden_size)
# check hidden size
self.assertListEqual(
[layer_hidden_states.shape for layer_hidden_states in iter_hidden_states] , [expected_shape] * len(UpperCAmelCase ) , )
pass
@slow
def UpperCAmelCase ( self : int ) -> Tuple:
for model_name in XLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowerCAmelCase: List[Any] = XLMModel.from_pretrained(UpperCAmelCase )
self.assertIsNotNone(UpperCAmelCase )
@require_torch
class A_ ( unittest.TestCase ):
@slow
def UpperCAmelCase ( self : Tuple ) -> Union[str, Any]:
__lowerCAmelCase: Union[str, Any] = XLMWithLMHeadModel.from_pretrained('xlm-mlm-en-2048' )
model.to(UpperCAmelCase )
__lowerCAmelCase: Optional[int] = torch.tensor([[1_4, 4_4_7]] , dtype=torch.long , device=UpperCAmelCase ) # the president
__lowerCAmelCase: Union[str, Any] = [
1_4,
4_4_7,
1_4,
4_4_7,
1_4,
4_4_7,
1_4,
4_4_7,
1_4,
4_4_7,
1_4,
4_4_7,
1_4,
4_4_7,
1_4,
4_4_7,
1_4,
4_4_7,
1_4,
4_4_7,
] # the president the president the president the president the president the president the president the president the president the president
# TODO(PVP): this and other input_ids I tried for generation give pretty bad results. Not sure why. Model might just not be made for auto-regressive inference
__lowerCAmelCase: str = model.generate(UpperCAmelCase , do_sample=UpperCAmelCase )
self.assertListEqual(output_ids[0].cpu().numpy().tolist() , UpperCAmelCase )
| 322 | 1 |
import sys
def _a ( SCREAMING_SNAKE_CASE : Optional[Any] ) -> Tuple:
"""simple docstring"""
__lowerCAmelCase: Any = len(SCREAMING_SNAKE_CASE )
__lowerCAmelCase: int = [[0 for x in range(SCREAMING_SNAKE_CASE )] for x in range(SCREAMING_SNAKE_CASE )]
__lowerCAmelCase: Dict = [[0 for x in range(SCREAMING_SNAKE_CASE )] for x in range(SCREAMING_SNAKE_CASE )]
for chain_length in range(2 , SCREAMING_SNAKE_CASE ):
for a in range(1 , n - chain_length + 1 ):
__lowerCAmelCase: Optional[Any] = a + chain_length - 1
__lowerCAmelCase: Union[str, Any] = sys.maxsize
for c in range(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
__lowerCAmelCase: List[Any] = (
matrix[a][c] + matrix[c + 1][b] + array[a - 1] * array[c] * array[b]
)
if cost < matrix[a][b]:
__lowerCAmelCase: Dict = cost
__lowerCAmelCase: Optional[int] = c
return matrix, sol
def _a ( SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : Dict , SCREAMING_SNAKE_CASE : Any ) -> int:
"""simple docstring"""
if i == j:
print('A' + str(SCREAMING_SNAKE_CASE ) , end=' ' )
else:
print('(' , end=' ' )
print_optiomal_solution(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , optimal_solution[i][j] )
print_optiomal_solution(SCREAMING_SNAKE_CASE , optimal_solution[i][j] + 1 , SCREAMING_SNAKE_CASE )
print(')' , end=' ' )
def _a ( ) -> List[Any]:
"""simple docstring"""
__lowerCAmelCase: str = [30, 35, 15, 5, 10, 20, 25]
__lowerCAmelCase: Tuple = len(SCREAMING_SNAKE_CASE )
# Size of matrix created from above array will be
# 30*35 35*15 15*5 5*10 10*20 20*25
__lowerCAmelCase , __lowerCAmelCase: Dict = matrix_chain_order(SCREAMING_SNAKE_CASE )
print('No. of Operation required: ' + str(matrix[1][n - 1] ) )
print_optiomal_solution(SCREAMING_SNAKE_CASE , 1 , n - 1 )
if __name__ == "__main__":
main()
| 322 |
def _a ( SCREAMING_SNAKE_CASE : Optional[int] ) -> Optional[int]:
"""simple docstring"""
__lowerCAmelCase: List[Any] = 0
__lowerCAmelCase: Optional[int] = len(SCREAMING_SNAKE_CASE )
for i in range(n - 1 ):
for j in range(i + 1 , SCREAMING_SNAKE_CASE ):
if arr[i] > arr[j]:
num_inversions += 1
return num_inversions
def _a ( SCREAMING_SNAKE_CASE : Any ) -> str:
"""simple docstring"""
if len(SCREAMING_SNAKE_CASE ) <= 1:
return arr, 0
__lowerCAmelCase: str = len(SCREAMING_SNAKE_CASE ) // 2
__lowerCAmelCase: str = arr[0:mid]
__lowerCAmelCase: int = arr[mid:]
__lowerCAmelCase , __lowerCAmelCase: List[Any] = count_inversions_recursive(SCREAMING_SNAKE_CASE )
__lowerCAmelCase , __lowerCAmelCase: Dict = count_inversions_recursive(SCREAMING_SNAKE_CASE )
__lowerCAmelCase , __lowerCAmelCase: int = _count_cross_inversions(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
__lowerCAmelCase: int = inversion_p + inversions_q + cross_inversions
return c, num_inversions
def _a ( SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : Any ) -> Optional[int]:
"""simple docstring"""
__lowerCAmelCase: List[str] = []
__lowerCAmelCase: List[str] = 0
while i < len(SCREAMING_SNAKE_CASE ) and j < len(SCREAMING_SNAKE_CASE ):
if p[i] > q[j]:
# if P[1] > Q[j], then P[k] > Q[k] for all i < k <= len(P)
# These are all inversions. The claim emerges from the
# property that P is sorted.
num_inversion += len(SCREAMING_SNAKE_CASE ) - i
r.append(q[j] )
j += 1
else:
r.append(p[i] )
i += 1
if i < len(SCREAMING_SNAKE_CASE ):
r.extend(p[i:] )
else:
r.extend(q[j:] )
return r, num_inversion
def _a ( ) -> int:
"""simple docstring"""
__lowerCAmelCase: List[Any] = [10, 2, 1, 5, 5, 2, 11]
# this arr has 8 inversions:
# (10, 2), (10, 1), (10, 5), (10, 5), (10, 2), (2, 1), (5, 2), (5, 2)
__lowerCAmelCase: Tuple = count_inversions_bf(SCREAMING_SNAKE_CASE )
__lowerCAmelCase , __lowerCAmelCase: str = count_inversions_recursive(SCREAMING_SNAKE_CASE )
assert num_inversions_bf == num_inversions_recursive == 8
print('number of inversions = ' , SCREAMING_SNAKE_CASE )
# testing an array with zero inversion (a sorted arr_1)
arr_a.sort()
__lowerCAmelCase: Tuple = count_inversions_bf(SCREAMING_SNAKE_CASE )
__lowerCAmelCase , __lowerCAmelCase: Optional[Any] = count_inversions_recursive(SCREAMING_SNAKE_CASE )
assert num_inversions_bf == num_inversions_recursive == 0
print('number of inversions = ' , SCREAMING_SNAKE_CASE )
# an empty list should also have zero inversions
__lowerCAmelCase: int = []
__lowerCAmelCase: Any = count_inversions_bf(SCREAMING_SNAKE_CASE )
__lowerCAmelCase , __lowerCAmelCase: Dict = count_inversions_recursive(SCREAMING_SNAKE_CASE )
assert num_inversions_bf == num_inversions_recursive == 0
print('number of inversions = ' , SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
main()
| 322 | 1 |
import os
import pytest
from datasets import (
get_dataset_config_info,
get_dataset_config_names,
get_dataset_infos,
get_dataset_split_names,
inspect_dataset,
inspect_metric,
)
UpperCAmelCase__ = pytest.mark.integration
@pytest.mark.parametrize('''path''' , ['''paws''', '''csv'''] )
def _a ( a :Dict , a :Optional[int] ) -> List[str]:
inspect_dataset(a , a )
a = path + '''.py'''
assert script_name in os.listdir(a )
assert "__pycache__" not in os.listdir(a )
@pytest.mark.filterwarnings('''ignore:inspect_metric is deprecated:FutureWarning''' )
@pytest.mark.filterwarnings('''ignore:metric_module_factory is deprecated:FutureWarning''' )
@pytest.mark.parametrize('''path''' , ['''accuracy'''] )
def _a ( a :Any , a :Optional[Any] ) -> Union[str, Any]:
inspect_metric(a , a )
a = path + '''.py'''
assert script_name in os.listdir(a )
assert "__pycache__" not in os.listdir(a )
@pytest.mark.parametrize(
'''path, config_name, expected_splits''' , [
('''squad''', '''plain_text''', ['''train''', '''validation''']),
('''dalle-mini/wit''', '''dalle-mini--wit''', ['''train''']),
('''paws''', '''labeled_final''', ['''train''', '''test''', '''validation''']),
] , )
def _a ( a :Union[str, Any] , a :List[Any] , a :Optional[Any] ) -> List[str]:
a = get_dataset_config_info(a , config_name=a )
assert info.config_name == config_name
assert list(info.splits.keys() ) == expected_splits
@pytest.mark.parametrize(
'''path, config_name, expected_exception''' , [
('''paws''', None, ValueError),
] , )
def _a ( a :str , a :Tuple , a :Any ) -> Tuple:
with pytest.raises(a ):
get_dataset_config_info(a , config_name=a )
@pytest.mark.parametrize(
'''path, expected''' , [
('''squad''', '''plain_text'''),
('''acronym_identification''', '''default'''),
('''lhoestq/squad''', '''plain_text'''),
('''lhoestq/test''', '''default'''),
('''lhoestq/demo1''', '''lhoestq--demo1'''),
('''dalle-mini/wit''', '''dalle-mini--wit'''),
] , )
def _a ( a :Any , a :Union[str, Any] ) -> Optional[Any]:
a = get_dataset_config_names(a )
assert expected in config_names
@pytest.mark.parametrize(
'''path, expected_configs, expected_splits_in_first_config''' , [
('''squad''', ['''plain_text'''], ['''train''', '''validation''']),
('''dalle-mini/wit''', ['''dalle-mini--wit'''], ['''train''']),
('''paws''', ['''labeled_final''', '''labeled_swap''', '''unlabeled_final'''], ['''train''', '''test''', '''validation''']),
] , )
def _a ( a :Optional[int] , a :Optional[Any] , a :Optional[Any] ) -> List[Any]:
a = get_dataset_infos(a )
assert list(infos.keys() ) == expected_configs
a = expected_configs[0]
assert expected_config in infos
a = infos[expected_config]
assert info.config_name == expected_config
assert list(info.splits.keys() ) == expected_splits_in_first_config
@pytest.mark.parametrize(
'''path, expected_config, expected_splits''' , [
('''squad''', '''plain_text''', ['''train''', '''validation''']),
('''dalle-mini/wit''', '''dalle-mini--wit''', ['''train''']),
('''paws''', '''labeled_final''', ['''train''', '''test''', '''validation''']),
] , )
def _a ( a :str , a :Union[str, Any] , a :Union[str, Any] ) -> Any:
a = get_dataset_infos(a )
assert expected_config in infos
a = infos[expected_config]
assert info.config_name == expected_config
assert list(info.splits.keys() ) == expected_splits
@pytest.mark.parametrize(
'''path, config_name, expected_exception''' , [
('''paws''', None, ValueError),
] , )
def _a ( a :Dict , a :Any , a :Optional[int] ) -> Tuple:
with pytest.raises(a ):
get_dataset_split_names(a , config_name=a )
| 0 |
import tempfile
import torch
from diffusers import (
DEISMultistepScheduler,
DPMSolverMultistepScheduler,
DPMSolverSinglestepScheduler,
UniPCMultistepScheduler,
)
from .test_schedulers import SchedulerCommonTest
class A_ ( snake_case__ ):
_lowercase : int = (DPMSolverSinglestepScheduler,)
_lowercase : Optional[Any] = (('num_inference_steps', 2_5),)
def UpperCAmelCase ( self : Dict , **UpperCAmelCase : List[Any] ) -> Optional[Any]:
__lowerCAmelCase: Union[str, Any] = {
'num_train_timesteps': 1_0_0_0,
'beta_start': 0.0001,
'beta_end': 0.02,
'beta_schedule': 'linear',
'solver_order': 2,
'prediction_type': 'epsilon',
'thresholding': False,
'sample_max_value': 1.0,
'algorithm_type': 'dpmsolver++',
'solver_type': 'midpoint',
'lambda_min_clipped': -float('inf' ),
'variance_type': None,
}
config.update(**UpperCAmelCase )
return config
def UpperCAmelCase ( self : str , UpperCAmelCase : List[Any]=0 , **UpperCAmelCase : str ) -> Any:
__lowerCAmelCase: Optional[int] = dict(self.forward_default_kwargs )
__lowerCAmelCase: int = kwargs.pop('num_inference_steps' , UpperCAmelCase )
__lowerCAmelCase: int = self.dummy_sample
__lowerCAmelCase: Union[str, Any] = 0.1 * sample
__lowerCAmelCase: str = [residual + 0.2, residual + 0.15, residual + 0.10]
for scheduler_class in self.scheduler_classes:
__lowerCAmelCase: Union[str, Any] = self.get_scheduler_config(**UpperCAmelCase )
__lowerCAmelCase: Optional[Any] = scheduler_class(**UpperCAmelCase )
scheduler.set_timesteps(UpperCAmelCase )
# copy over dummy past residuals
__lowerCAmelCase: Optional[int] = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(UpperCAmelCase )
__lowerCAmelCase: Dict = scheduler_class.from_pretrained(UpperCAmelCase )
new_scheduler.set_timesteps(UpperCAmelCase )
# copy over dummy past residuals
__lowerCAmelCase: Optional[int] = dummy_past_residuals[: new_scheduler.config.solver_order]
__lowerCAmelCase , __lowerCAmelCase: Optional[int] = sample, sample
for t in range(UpperCAmelCase , time_step + scheduler.config.solver_order + 1 ):
__lowerCAmelCase: str = scheduler.step(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , **UpperCAmelCase ).prev_sample
__lowerCAmelCase: str = new_scheduler.step(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , **UpperCAmelCase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def UpperCAmelCase ( self : str ) -> str:
pass
def UpperCAmelCase ( self : Optional[Any] , UpperCAmelCase : Any=0 , **UpperCAmelCase : Optional[int] ) -> Tuple:
__lowerCAmelCase: Tuple = dict(self.forward_default_kwargs )
__lowerCAmelCase: Tuple = kwargs.pop('num_inference_steps' , UpperCAmelCase )
__lowerCAmelCase: Tuple = self.dummy_sample
__lowerCAmelCase: Union[str, Any] = 0.1 * sample
__lowerCAmelCase: Tuple = [residual + 0.2, residual + 0.15, residual + 0.10]
for scheduler_class in self.scheduler_classes:
__lowerCAmelCase: Dict = self.get_scheduler_config()
__lowerCAmelCase: Any = scheduler_class(**UpperCAmelCase )
scheduler.set_timesteps(UpperCAmelCase )
# copy over dummy past residuals (must be after setting timesteps)
__lowerCAmelCase: List[Any] = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(UpperCAmelCase )
__lowerCAmelCase: List[str] = scheduler_class.from_pretrained(UpperCAmelCase )
# copy over dummy past residuals
new_scheduler.set_timesteps(UpperCAmelCase )
# copy over dummy past residual (must be after setting timesteps)
__lowerCAmelCase: Optional[Any] = dummy_past_residuals[: new_scheduler.config.solver_order]
__lowerCAmelCase: Any = scheduler.step(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , **UpperCAmelCase ).prev_sample
__lowerCAmelCase: Dict = new_scheduler.step(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , **UpperCAmelCase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def UpperCAmelCase ( self : int , UpperCAmelCase : Dict=None , **UpperCAmelCase : List[str] ) -> Union[str, Any]:
if scheduler is None:
__lowerCAmelCase: str = self.scheduler_classes[0]
__lowerCAmelCase: int = self.get_scheduler_config(**UpperCAmelCase )
__lowerCAmelCase: Any = scheduler_class(**UpperCAmelCase )
__lowerCAmelCase: List[Any] = self.scheduler_classes[0]
__lowerCAmelCase: List[str] = self.get_scheduler_config(**UpperCAmelCase )
__lowerCAmelCase: Optional[Any] = scheduler_class(**UpperCAmelCase )
__lowerCAmelCase: List[Any] = 1_0
__lowerCAmelCase: Dict = self.dummy_model()
__lowerCAmelCase: Dict = self.dummy_sample_deter
scheduler.set_timesteps(UpperCAmelCase )
for i, t in enumerate(scheduler.timesteps ):
__lowerCAmelCase: Dict = model(UpperCAmelCase , UpperCAmelCase )
__lowerCAmelCase: List[Any] = scheduler.step(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ).prev_sample
return sample
def UpperCAmelCase ( self : List[str] ) -> Union[str, Any]:
__lowerCAmelCase: List[str] = DPMSolverSinglestepScheduler(**self.get_scheduler_config() )
__lowerCAmelCase: Any = 5_0
__lowerCAmelCase: int = self.dummy_model()
__lowerCAmelCase: List[str] = self.dummy_sample_deter
scheduler.set_timesteps(UpperCAmelCase )
# make sure that the first t is uneven
for i, t in enumerate(scheduler.timesteps[3:] ):
__lowerCAmelCase: List[Any] = model(UpperCAmelCase , UpperCAmelCase )
__lowerCAmelCase: List[Any] = scheduler.step(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ).prev_sample
__lowerCAmelCase: Optional[int] = torch.mean(torch.abs(UpperCAmelCase ) )
assert abs(result_mean.item() - 0.2574 ) < 1E-3
def UpperCAmelCase ( self : Optional[int] ) -> Dict:
for timesteps in [2_5, 5_0, 1_0_0, 9_9_9, 1_0_0_0]:
self.check_over_configs(num_train_timesteps=UpperCAmelCase )
def UpperCAmelCase ( self : Optional[Any] ) -> Any:
# make sure that iterating over schedulers with same config names gives same results
# for defaults
__lowerCAmelCase: List[str] = DPMSolverSinglestepScheduler(**self.get_scheduler_config() )
__lowerCAmelCase: Dict = self.full_loop(scheduler=UpperCAmelCase )
__lowerCAmelCase: Optional[Any] = torch.mean(torch.abs(UpperCAmelCase ) )
assert abs(result_mean.item() - 0.2791 ) < 1E-3
__lowerCAmelCase: Tuple = DEISMultistepScheduler.from_config(scheduler.config )
__lowerCAmelCase: List[str] = DPMSolverMultistepScheduler.from_config(scheduler.config )
__lowerCAmelCase: Any = UniPCMultistepScheduler.from_config(scheduler.config )
__lowerCAmelCase: Optional[int] = DPMSolverSinglestepScheduler.from_config(scheduler.config )
__lowerCAmelCase: Union[str, Any] = self.full_loop(scheduler=UpperCAmelCase )
__lowerCAmelCase: List[Any] = torch.mean(torch.abs(UpperCAmelCase ) )
assert abs(result_mean.item() - 0.2791 ) < 1E-3
def UpperCAmelCase ( self : List[str] ) -> List[str]:
self.check_over_configs(thresholding=UpperCAmelCase )
for order in [1, 2, 3]:
for solver_type in ["midpoint", "heun"]:
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
thresholding=UpperCAmelCase , prediction_type=UpperCAmelCase , sample_max_value=UpperCAmelCase , algorithm_type='dpmsolver++' , solver_order=UpperCAmelCase , solver_type=UpperCAmelCase , )
def UpperCAmelCase ( self : Any ) -> Union[str, Any]:
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=UpperCAmelCase )
def UpperCAmelCase ( self : Tuple ) -> str:
for algorithm_type in ["dpmsolver", "dpmsolver++"]:
for solver_type in ["midpoint", "heun"]:
for order in [1, 2, 3]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
solver_order=UpperCAmelCase , solver_type=UpperCAmelCase , prediction_type=UpperCAmelCase , algorithm_type=UpperCAmelCase , )
__lowerCAmelCase: Dict = self.full_loop(
solver_order=UpperCAmelCase , solver_type=UpperCAmelCase , prediction_type=UpperCAmelCase , algorithm_type=UpperCAmelCase , )
assert not torch.isnan(UpperCAmelCase ).any(), "Samples have nan numbers"
def UpperCAmelCase ( self : Optional[Any] ) -> str:
self.check_over_configs(lower_order_final=UpperCAmelCase )
self.check_over_configs(lower_order_final=UpperCAmelCase )
def UpperCAmelCase ( self : str ) -> Any:
self.check_over_configs(lambda_min_clipped=-float('inf' ) )
self.check_over_configs(lambda_min_clipped=-5.1 )
def UpperCAmelCase ( self : List[Any] ) -> str:
self.check_over_configs(variance_type=UpperCAmelCase )
self.check_over_configs(variance_type='learned_range' )
def UpperCAmelCase ( self : Union[str, Any] ) -> List[Any]:
for num_inference_steps in [1, 2, 3, 5, 1_0, 5_0, 1_0_0, 9_9_9, 1_0_0_0]:
self.check_over_forward(num_inference_steps=UpperCAmelCase , time_step=0 )
def UpperCAmelCase ( self : Any ) -> int:
__lowerCAmelCase: Any = self.full_loop()
__lowerCAmelCase: Tuple = torch.mean(torch.abs(UpperCAmelCase ) )
assert abs(result_mean.item() - 0.2791 ) < 1E-3
def UpperCAmelCase ( self : Any ) -> Union[str, Any]:
__lowerCAmelCase: List[str] = self.full_loop(use_karras_sigmas=UpperCAmelCase )
__lowerCAmelCase: str = torch.mean(torch.abs(UpperCAmelCase ) )
assert abs(result_mean.item() - 0.2248 ) < 1E-3
def UpperCAmelCase ( self : Dict ) -> Optional[Any]:
__lowerCAmelCase: Tuple = self.full_loop(prediction_type='v_prediction' )
__lowerCAmelCase: List[str] = torch.mean(torch.abs(UpperCAmelCase ) )
assert abs(result_mean.item() - 0.1453 ) < 1E-3
def UpperCAmelCase ( self : str ) -> List[str]:
__lowerCAmelCase: int = self.full_loop(prediction_type='v_prediction' , use_karras_sigmas=UpperCAmelCase )
__lowerCAmelCase: Tuple = torch.mean(torch.abs(UpperCAmelCase ) )
assert abs(result_mean.item() - 0.0649 ) < 1E-3
def UpperCAmelCase ( self : Optional[Any] ) -> Optional[Any]:
__lowerCAmelCase: Any = self.scheduler_classes[0]
__lowerCAmelCase: Optional[Any] = self.get_scheduler_config(thresholding=UpperCAmelCase , dynamic_thresholding_ratio=0 )
__lowerCAmelCase: List[str] = scheduler_class(**UpperCAmelCase )
__lowerCAmelCase: Optional[int] = 1_0
__lowerCAmelCase: Union[str, Any] = self.dummy_model()
__lowerCAmelCase: int = self.dummy_sample_deter.half()
scheduler.set_timesteps(UpperCAmelCase )
for i, t in enumerate(scheduler.timesteps ):
__lowerCAmelCase: Any = model(UpperCAmelCase , UpperCAmelCase )
__lowerCAmelCase: Any = scheduler.step(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ).prev_sample
assert sample.dtype == torch.floataa
| 322 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.