code
stringlengths 81
54k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
---|---|---|---|---|
"""simple docstring"""
A__ : Optional[int] = 8.314_4598
def a__ ( lowerCAmelCase : float , lowerCAmelCase : float ):
'''simple docstring'''
if temperature < 0:
raise Exception("Temperature cannot be less than 0 K" )
if molar_mass <= 0:
raise Exception("Molar mass cannot be less than or equal to 0 kg/mol" )
else:
return (3 * UNIVERSAL_GAS_CONSTANT * temperature / molar_mass) ** 0.5
if __name__ == "__main__":
import doctest
# run doctest
doctest.testmod()
# example
A__ : List[Any] = 300
A__ : Dict = 28
A__ : Tuple = rms_speed_of_molecule(temperature, molar_mass)
print(f"""Vrms of Nitrogen gas at 300 K is {vrms} m/s""")
| 660 |
"""simple docstring"""
import argparse
import torch
from transformers import GPTaConfig, GPTaModel, load_tf_weights_in_gpta
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
logging.set_verbosity_info()
def a__ ( lowerCAmelCase : List[Any] , lowerCAmelCase : Any , lowerCAmelCase : List[Any] ):
'''simple docstring'''
# Construct model
if gpta_config_file == "":
UpperCAmelCase__ : Optional[int] = GPTaConfig()
else:
UpperCAmelCase__ : Dict = GPTaConfig.from_json_file(lowerCAmelCase )
UpperCAmelCase__ : Optional[Any] = GPTaModel(lowerCAmelCase )
# Load weights from numpy
load_tf_weights_in_gpta(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
# Save pytorch-model
UpperCAmelCase__ : Optional[int] = pytorch_dump_folder_path + "/" + WEIGHTS_NAME
UpperCAmelCase__ : Any = pytorch_dump_folder_path + "/" + CONFIG_NAME
print(F"Save PyTorch model to {pytorch_weights_dump_path}" )
torch.save(model.state_dict() , lowerCAmelCase )
print(F"Save configuration file to {pytorch_config_dump_path}" )
with open(lowerCAmelCase , "w" , encoding="utf-8" ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
A__ : Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--gpt2_checkpoint_path""", default=None, type=str, required=True, help="""Path to the TensorFlow checkpoint path."""
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
parser.add_argument(
"""--gpt2_config_file""",
default="""""",
type=str,
help=(
"""An optional config json file corresponding to the pre-trained OpenAI model. \n"""
"""This specifies the model architecture."""
),
)
A__ : Optional[Any] = parser.parse_args()
convert_gpta_checkpoint_to_pytorch(args.gpta_checkpoint_path, args.gpta_config_file, args.pytorch_dump_folder_path)
| 660 | 1 |
"""simple docstring"""
from __future__ import annotations
from collections.abc import Generator
def a__ ( ):
'''simple docstring'''
UpperCAmelCase__ : dict[int, int] = {}
UpperCAmelCase__ : Optional[Any] = 2
while True:
UpperCAmelCase__ : Dict = factor_map.pop(lowerCAmelCase , lowerCAmelCase )
if factor:
UpperCAmelCase__ : int = factor + prime
while x in factor_map:
x += factor
UpperCAmelCase__ : Tuple = factor
else:
UpperCAmelCase__ : Union[str, Any] = prime
yield prime
prime += 1
def a__ ( lowerCAmelCase : float = 1E10 ):
'''simple docstring'''
UpperCAmelCase__ : Dict = sieve()
UpperCAmelCase__ : Union[str, Any] = 1
while True:
UpperCAmelCase__ : Tuple = next(lowerCAmelCase )
if (2 * prime * n) > limit:
return n
# Ignore the next prime as the reminder will be 2.
next(lowerCAmelCase )
n += 2
if __name__ == "__main__":
print(solution())
| 660 |
"""simple docstring"""
import argparse
import os
import torch
from transformers.utils import WEIGHTS_NAME
A__ : Optional[int] = ["""small""", """medium""", """large"""]
A__ : Optional[int] = """lm_head.decoder.weight"""
A__ : Dict = """lm_head.weight"""
def a__ ( lowerCAmelCase : str , lowerCAmelCase : str ):
'''simple docstring'''
UpperCAmelCase__ : Dict = torch.load(lowerCAmelCase )
UpperCAmelCase__ : Optional[int] = d.pop(lowerCAmelCase )
os.makedirs(lowerCAmelCase , exist_ok=lowerCAmelCase )
torch.save(lowerCAmelCase , os.path.join(lowerCAmelCase , lowerCAmelCase ) )
if __name__ == "__main__":
A__ : List[Any] = argparse.ArgumentParser()
parser.add_argument("""--dialogpt_path""", default=""".""", type=str)
A__ : Tuple = parser.parse_args()
for MODEL in DIALOGPT_MODELS:
A__ : Tuple = os.path.join(args.dialogpt_path, f"""{MODEL}_ft.pkl""")
A__ : str = f"""./DialoGPT-{MODEL}"""
convert_dialogpt_checkpoint(
checkpoint_path,
pytorch_dump_folder_path,
)
| 660 | 1 |
"""simple docstring"""
import logging
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
import numpy as np
import torch
from datasets import load_dataset
from torchvision.transforms import Compose, Lambda, Normalize, RandomHorizontalFlip, RandomResizedCrop, ToTensor
import transformers
from transformers import (
CONFIG_MAPPING,
IMAGE_PROCESSOR_MAPPING,
MODEL_FOR_MASKED_IMAGE_MODELING_MAPPING,
AutoConfig,
AutoImageProcessor,
AutoModelForMaskedImageModeling,
HfArgumentParser,
Trainer,
TrainingArguments,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
A__ : Optional[int] = logging.getLogger(__name__)
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version("""4.31.0""")
require_version("""datasets>=1.8.0""", """To fix: pip install -r examples/pytorch/image-pretraining/requirements.txt""")
A__ : int = list(MODEL_FOR_MASKED_IMAGE_MODELING_MAPPING.keys())
A__ : Any = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class _lowercase :
'''simple docstring'''
_A = field(
default='cifar10' , metadata={'help': 'Name of a dataset from the datasets package'} )
_A = field(
default=lowerCAmelCase_ , metadata={'help': 'The configuration name of the dataset to use (via the datasets library).'} )
_A = field(
default=lowerCAmelCase_ , metadata={'help': 'The column name of the images in the files. If not set, will try to use \'image\' or \'img\'.'} , )
_A = field(default=lowerCAmelCase_ , metadata={'help': 'A folder containing the training data.'} )
_A = field(default=lowerCAmelCase_ , metadata={'help': 'A folder containing the validation data.'} )
_A = field(
default=0.15 , metadata={'help': 'Percent to split off of train for validation.'} )
_A = field(default=32 , metadata={'help': 'The size of the square patches to use for masking.'} )
_A = field(
default=0.6 , metadata={'help': 'Percentage of patches to mask.'} , )
_A = field(
default=lowerCAmelCase_ , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of training examples to this '
'value if set.'
)
} , )
_A = field(
default=lowerCAmelCase_ , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of evaluation examples to this '
'value if set.'
)
} , )
def lowerCAmelCase__ ( self )-> str:
UpperCAmelCase__ : List[str] = {}
if self.train_dir is not None:
UpperCAmelCase__ : Any = self.train_dir
if self.validation_dir is not None:
UpperCAmelCase__ : List[str] = self.validation_dir
UpperCAmelCase__ : List[Any] = data_files if data_files else None
@dataclass
class _lowercase :
'''simple docstring'''
_A = field(
default=lowerCAmelCase_ , metadata={
'help': (
'The model checkpoint for weights initialization. Can be a local path to a pytorch_model.bin or a '
'checkpoint identifier on the hub. '
'Don\'t set if you want to train a model from scratch.'
)
} , )
_A = field(
default=lowerCAmelCase_ , metadata={'help': 'If training from scratch, pass a model type from the list: ' + ', '.join(lowerCAmelCase_ )} , )
_A = field(
default=lowerCAmelCase_ , metadata={'help': 'Pretrained config name or path if not the same as model_name'} )
_A = field(
default=lowerCAmelCase_ , metadata={
'help': (
'Override some existing default config settings when a model is trained from scratch. Example: '
'n_embd=10,resid_pdrop=0.2,scale_attn_weights=false,summary_type=cls_index'
)
} , )
_A = field(
default=lowerCAmelCase_ , metadata={'help': 'Where do you want to store (cache) the pretrained models/datasets downloaded from the hub'} , )
_A = field(
default='main' , metadata={'help': 'The specific model version to use (can be a branch name, tag name or commit id).'} , )
_A = field(default=lowerCAmelCase_ , metadata={'help': 'Name or path of preprocessor config.'} )
_A = field(
default=lowerCAmelCase_ , metadata={
'help': (
'Will use the token generated when running `huggingface-cli login` (necessary to use this script '
'with private models).'
)
} , )
_A = field(
default=lowerCAmelCase_ , metadata={
'help': (
'The size (resolution) of each image. If not specified, will use `image_size` of the configuration.'
)
} , )
_A = field(
default=lowerCAmelCase_ , metadata={
'help': (
'The size (resolution) of each patch. If not specified, will use `patch_size` of the configuration.'
)
} , )
_A = field(
default=lowerCAmelCase_ , metadata={'help': 'Stride to use for the encoder.'} , )
class _lowercase :
'''simple docstring'''
def __init__( self , __UpperCamelCase=1_92 , __UpperCamelCase=32 , __UpperCamelCase=4 , __UpperCamelCase=0.6 )-> Union[str, Any]:
UpperCAmelCase__ : List[Any] = input_size
UpperCAmelCase__ : Union[str, Any] = mask_patch_size
UpperCAmelCase__ : Optional[int] = model_patch_size
UpperCAmelCase__ : Tuple = mask_ratio
if self.input_size % self.mask_patch_size != 0:
raise ValueError("Input size must be divisible by mask patch size" )
if self.mask_patch_size % self.model_patch_size != 0:
raise ValueError("Mask patch size must be divisible by model patch size" )
UpperCAmelCase__ : Dict = self.input_size // self.mask_patch_size
UpperCAmelCase__ : Dict = self.mask_patch_size // self.model_patch_size
UpperCAmelCase__ : int = self.rand_size**2
UpperCAmelCase__ : List[str] = int(np.ceil(self.token_count * self.mask_ratio ) )
def __call__( self )-> int:
UpperCAmelCase__ : Any = np.random.permutation(self.token_count )[: self.mask_count]
UpperCAmelCase__ : Any = np.zeros(self.token_count , dtype=__UpperCamelCase )
UpperCAmelCase__ : int = 1
UpperCAmelCase__ : Any = mask.reshape((self.rand_size, self.rand_size) )
UpperCAmelCase__ : str = mask.repeat(self.scale , axis=0 ).repeat(self.scale , axis=1 )
return torch.tensor(mask.flatten() )
def a__ ( lowerCAmelCase : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase__ : Union[str, Any] = torch.stack([example["pixel_values"] for example in examples] )
UpperCAmelCase__ : List[Any] = torch.stack([example["mask"] for example in examples] )
return {"pixel_values": pixel_values, "bool_masked_pos": mask}
def a__ ( ):
'''simple docstring'''
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
UpperCAmelCase__ : Any = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(".json" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : Any = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : Optional[Any] = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry("run_mim" , lowerCAmelCase , lowerCAmelCase )
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
UpperCAmelCase__ : str = training_args.get_process_log_level()
logger.setLevel(lowerCAmelCase )
transformers.utils.logging.set_verbosity(lowerCAmelCase )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
F"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"
+ F"distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}" )
logger.info(F"Training/evaluation parameters {training_args}" )
# Detecting last checkpoint.
UpperCAmelCase__ : Dict = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
UpperCAmelCase__ : List[str] = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F"Output directory ({training_args.output_dir}) already exists and is not empty. "
"Use --overwrite_output_dir to overcome." )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
F"Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change "
"the `--output_dir` or add `--overwrite_output_dir` to train from scratch." )
# Initialize our dataset.
UpperCAmelCase__ : Union[str, Any] = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , data_files=data_args.data_files , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
# If we don't have a validation split, split off a percentage of train as validation.
UpperCAmelCase__ : int = None if "validation" in ds.keys() else data_args.train_val_split
if isinstance(data_args.train_val_split , lowerCAmelCase ) and data_args.train_val_split > 0.0:
UpperCAmelCase__ : Tuple = ds["train"].train_test_split(data_args.train_val_split )
UpperCAmelCase__ : str = split["train"]
UpperCAmelCase__ : str = split["test"]
# Create config
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
UpperCAmelCase__ : Tuple = {
"cache_dir": model_args.cache_dir,
"revision": model_args.model_revision,
"use_auth_token": True if model_args.use_auth_token else None,
}
if model_args.config_name_or_path:
UpperCAmelCase__ : List[Any] = AutoConfig.from_pretrained(model_args.config_name_or_path , **lowerCAmelCase )
elif model_args.model_name_or_path:
UpperCAmelCase__ : str = AutoConfig.from_pretrained(model_args.model_name_or_path , **lowerCAmelCase )
else:
UpperCAmelCase__ : List[Any] = CONFIG_MAPPING[model_args.model_type]()
logger.warning("You are instantiating a new config instance from scratch." )
if model_args.config_overrides is not None:
logger.info(F"Overriding config: {model_args.config_overrides}" )
config.update_from_string(model_args.config_overrides )
logger.info(F"New config: {config}" )
# make sure the decoder_type is "simmim" (only relevant for BEiT)
if hasattr(lowerCAmelCase , "decoder_type" ):
UpperCAmelCase__ : Optional[Any] = "simmim"
# adapt config
UpperCAmelCase__ : Dict = model_args.image_size if model_args.image_size is not None else config.image_size
UpperCAmelCase__ : Union[str, Any] = model_args.patch_size if model_args.patch_size is not None else config.patch_size
UpperCAmelCase__ : Union[str, Any] = (
model_args.encoder_stride if model_args.encoder_stride is not None else config.encoder_stride
)
config.update(
{
"image_size": model_args.image_size,
"patch_size": model_args.patch_size,
"encoder_stride": model_args.encoder_stride,
} )
# create image processor
if model_args.image_processor_name:
UpperCAmelCase__ : Optional[Any] = AutoImageProcessor.from_pretrained(model_args.image_processor_name , **lowerCAmelCase )
elif model_args.model_name_or_path:
UpperCAmelCase__ : Any = AutoImageProcessor.from_pretrained(model_args.model_name_or_path , **lowerCAmelCase )
else:
UpperCAmelCase__ : Tuple = {
conf.model_type: image_processor_class for conf, image_processor_class in IMAGE_PROCESSOR_MAPPING.items()
}
UpperCAmelCase__ : Tuple = IMAGE_PROCESSOR_TYPES[model_args.model_type]()
# create model
if model_args.model_name_or_path:
UpperCAmelCase__ : Optional[Any] = AutoModelForMaskedImageModeling.from_pretrained(
model_args.model_name_or_path , from_tf=bool(".ckpt" in model_args.model_name_or_path ) , config=lowerCAmelCase , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
else:
logger.info("Training new model from scratch" )
UpperCAmelCase__ : Tuple = AutoModelForMaskedImageModeling.from_config(lowerCAmelCase )
if training_args.do_train:
UpperCAmelCase__ : Union[str, Any] = ds["train"].column_names
else:
UpperCAmelCase__ : List[Any] = ds["validation"].column_names
if data_args.image_column_name is not None:
UpperCAmelCase__ : Union[str, Any] = data_args.image_column_name
elif "image" in column_names:
UpperCAmelCase__ : Union[str, Any] = "image"
elif "img" in column_names:
UpperCAmelCase__ : Tuple = "img"
else:
UpperCAmelCase__ : Any = column_names[0]
# transformations as done in original SimMIM paper
# source: https://github.com/microsoft/SimMIM/blob/main/data/data_simmim.py
UpperCAmelCase__ : int = Compose(
[
Lambda(lambda lowerCAmelCase : img.convert("RGB" ) if img.mode != "RGB" else img ),
RandomResizedCrop(model_args.image_size , scale=(0.67, 1.0) , ratio=(3.0 / 4.0, 4.0 / 3.0) ),
RandomHorizontalFlip(),
ToTensor(),
Normalize(mean=image_processor.image_mean , std=image_processor.image_std ),
] )
# create mask generator
UpperCAmelCase__ : Optional[int] = MaskGenerator(
input_size=model_args.image_size , mask_patch_size=data_args.mask_patch_size , model_patch_size=model_args.patch_size , mask_ratio=data_args.mask_ratio , )
def preprocess_images(lowerCAmelCase : List[Any] ):
UpperCAmelCase__ : Optional[int] = [transforms(lowerCAmelCase ) for image in examples[image_column_name]]
UpperCAmelCase__ : int = [mask_generator() for i in range(len(examples[image_column_name] ) )]
return examples
if training_args.do_train:
if "train" not in ds:
raise ValueError("--do_train requires a train dataset" )
if data_args.max_train_samples is not None:
UpperCAmelCase__ : Optional[Any] = ds["train"].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) )
# Set the training transforms
ds["train"].set_transform(lowerCAmelCase )
if training_args.do_eval:
if "validation" not in ds:
raise ValueError("--do_eval requires a validation dataset" )
if data_args.max_eval_samples is not None:
UpperCAmelCase__ : Optional[int] = (
ds["validation"].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) )
)
# Set the validation transforms
ds["validation"].set_transform(lowerCAmelCase )
# Initialize our trainer
UpperCAmelCase__ : Tuple = Trainer(
model=lowerCAmelCase , args=lowerCAmelCase , train_dataset=ds["train"] if training_args.do_train else None , eval_dataset=ds["validation"] if training_args.do_eval else None , tokenizer=lowerCAmelCase , data_collator=lowerCAmelCase , )
# Training
if training_args.do_train:
UpperCAmelCase__ : int = None
if training_args.resume_from_checkpoint is not None:
UpperCAmelCase__ : Optional[int] = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
UpperCAmelCase__ : Optional[int] = last_checkpoint
UpperCAmelCase__ : List[str] = trainer.train(resume_from_checkpoint=lowerCAmelCase )
trainer.save_model()
trainer.log_metrics("train" , train_result.metrics )
trainer.save_metrics("train" , train_result.metrics )
trainer.save_state()
# Evaluation
if training_args.do_eval:
UpperCAmelCase__ : Any = trainer.evaluate()
trainer.log_metrics("eval" , lowerCAmelCase )
trainer.save_metrics("eval" , lowerCAmelCase )
# Write model card and (optionally) push to hub
UpperCAmelCase__ : Optional[Any] = {
"finetuned_from": model_args.model_name_or_path,
"tasks": "masked-image-modeling",
"dataset": data_args.dataset_name,
"tags": ["masked-image-modeling"],
}
if training_args.push_to_hub:
trainer.push_to_hub(**lowerCAmelCase )
else:
trainer.create_model_card(**lowerCAmelCase )
if __name__ == "__main__":
main()
| 660 |
"""simple docstring"""
from math import isqrt
def a__ ( lowerCAmelCase : int ):
'''simple docstring'''
UpperCAmelCase__ : Tuple = [True] * max_number
for i in range(2 , isqrt(max_number - 1 ) + 1 ):
if is_prime[i]:
for j in range(i**2 , lowerCAmelCase , lowerCAmelCase ):
UpperCAmelCase__ : List[Any] = False
return [i for i in range(2 , lowerCAmelCase ) if is_prime[i]]
def a__ ( lowerCAmelCase : int = 10**8 ):
'''simple docstring'''
UpperCAmelCase__ : Dict = calculate_prime_numbers(max_number // 2 )
UpperCAmelCase__ : Optional[int] = 0
UpperCAmelCase__ : Optional[int] = 0
UpperCAmelCase__ : Tuple = len(lowerCAmelCase ) - 1
while left <= right:
while prime_numbers[left] * prime_numbers[right] >= max_number:
right -= 1
semiprimes_count += right - left + 1
left += 1
return semiprimes_count
if __name__ == "__main__":
print(f"""{solution() = }""")
| 660 | 1 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A__ : Tuple = logging.get_logger(__name__)
A__ : List[Any] = {
"""facebook/xglm-564M""": """https://huggingface.co/facebook/xglm-564M/resolve/main/config.json""",
# See all XGLM models at https://huggingface.co/models?filter=xglm
}
class _lowercase ( lowerCAmelCase_ ):
'''simple docstring'''
_A = 'xglm'
_A = ['past_key_values']
_A = {
'num_attention_heads': 'attention_heads',
'hidden_size': 'd_model',
'num_hidden_layers': 'num_layers',
}
def __init__( self , __UpperCamelCase=25_60_08 , __UpperCamelCase=20_48 , __UpperCamelCase=10_24 , __UpperCamelCase=40_96 , __UpperCamelCase=24 , __UpperCamelCase=16 , __UpperCamelCase="gelu" , __UpperCamelCase=0.1 , __UpperCamelCase=0.1 , __UpperCamelCase=0.0 , __UpperCamelCase=0.0 , __UpperCamelCase=0.02 , __UpperCamelCase=True , __UpperCamelCase=True , __UpperCamelCase=2 , __UpperCamelCase=1 , __UpperCamelCase=0 , __UpperCamelCase=2 , **__UpperCamelCase , )-> Union[str, Any]:
UpperCAmelCase__ : Union[str, Any] = vocab_size
UpperCAmelCase__ : str = max_position_embeddings
UpperCAmelCase__ : Optional[int] = d_model
UpperCAmelCase__ : List[str] = ffn_dim
UpperCAmelCase__ : Union[str, Any] = num_layers
UpperCAmelCase__ : Union[str, Any] = attention_heads
UpperCAmelCase__ : Optional[int] = activation_function
UpperCAmelCase__ : List[Any] = dropout
UpperCAmelCase__ : List[str] = attention_dropout
UpperCAmelCase__ : str = activation_dropout
UpperCAmelCase__ : str = layerdrop
UpperCAmelCase__ : List[Any] = init_std
UpperCAmelCase__ : List[str] = scale_embedding # scale factor will be sqrt(d_model) if True
UpperCAmelCase__ : Tuple = use_cache
super().__init__(
pad_token_id=__UpperCamelCase , bos_token_id=__UpperCamelCase , eos_token_id=__UpperCamelCase , decoder_start_token_id=__UpperCamelCase , **__UpperCamelCase , )
| 660 |
"""simple docstring"""
import unicodedata
from dataclasses import dataclass
from typing import Optional, Union
import numpy as np
from transformers.data.data_collator import DataCollatorMixin
from transformers.file_utils import PaddingStrategy
from transformers.tokenization_utils_base import PreTrainedTokenizerBase
def a__ ( lowerCAmelCase : str , lowerCAmelCase : Optional[Any] , lowerCAmelCase : Dict , lowerCAmelCase : List[Any] ):
'''simple docstring'''
if isinstance(lowerCAmelCase , lowerCAmelCase ):
UpperCAmelCase__ : Optional[int] = np.full((len(lowerCAmelCase ), sequence_length, 2) , lowerCAmelCase )
else:
UpperCAmelCase__ : Optional[Any] = np.full((len(lowerCAmelCase ), sequence_length) , lowerCAmelCase )
for i, tensor in enumerate(lowerCAmelCase ):
if padding_side == "right":
if isinstance(lowerCAmelCase , lowerCAmelCase ):
UpperCAmelCase__ : Dict = tensor[:sequence_length]
else:
UpperCAmelCase__ : Tuple = tensor[:sequence_length]
else:
if isinstance(lowerCAmelCase , lowerCAmelCase ):
UpperCAmelCase__ : Optional[Any] = tensor[:sequence_length]
else:
UpperCAmelCase__ : int = tensor[:sequence_length]
return out_tensor.tolist()
def a__ ( lowerCAmelCase : Optional[int] ):
'''simple docstring'''
UpperCAmelCase__ : Tuple = ord(lowerCAmelCase )
if (cp >= 33 and cp <= 47) or (cp >= 58 and cp <= 64) or (cp >= 91 and cp <= 96) or (cp >= 123 and cp <= 126):
return True
UpperCAmelCase__ : Optional[Any] = unicodedata.category(lowerCAmelCase )
if cat.startswith("P" ):
return True
return False
@dataclass
class _lowercase ( lowerCAmelCase_ ):
'''simple docstring'''
_A = 42
_A = True
_A = None
_A = None
_A = -100
_A = "pt"
def lowerCAmelCase__ ( self , __UpperCamelCase )-> List[str]:
import torch
UpperCAmelCase__ : Optional[Any] = "label" if "label" in features[0].keys() else "labels"
UpperCAmelCase__ : Dict = [feature[label_name] for feature in features] if label_name in features[0].keys() else None
UpperCAmelCase__ : str = self.tokenizer.pad(
__UpperCamelCase , padding=self.padding , max_length=self.max_length , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors="pt" if labels is None else None , )
if labels is None:
return batch
UpperCAmelCase__ : Optional[Any] = torch.tensor(batch["entity_ids"] ).shape[1]
UpperCAmelCase__ : int = self.tokenizer.padding_side
if padding_side == "right":
UpperCAmelCase__ : int = [
list(__UpperCamelCase ) + [self.label_pad_token_id] * (sequence_length - len(__UpperCamelCase )) for label in labels
]
else:
UpperCAmelCase__ : List[Any] = [
[self.label_pad_token_id] * (sequence_length - len(__UpperCamelCase )) + list(__UpperCamelCase ) for label in labels
]
UpperCAmelCase__ : Optional[Any] = [feature["ner_tags"] for feature in features]
UpperCAmelCase__ : int = padding_tensor(__UpperCamelCase , -1 , __UpperCamelCase , __UpperCamelCase )
UpperCAmelCase__ : List[Any] = [feature["original_entity_spans"] for feature in features]
UpperCAmelCase__ : int = padding_tensor(__UpperCamelCase , (-1, -1) , __UpperCamelCase , __UpperCamelCase )
UpperCAmelCase__ : Optional[int] = {k: torch.tensor(__UpperCamelCase , dtype=torch.intaa ) for k, v in batch.items()}
return batch
| 660 | 1 |
"""simple docstring"""
from typing import List
from .keymap import KEYMAP, get_character
def a__ ( lowerCAmelCase : str ):
'''simple docstring'''
def decorator(lowerCAmelCase : Any ):
UpperCAmelCase__ : Optional[int] = getattr(lowerCAmelCase , "handle_key" , [] )
handle += [key]
setattr(lowerCAmelCase , "handle_key" , lowerCAmelCase )
return func
return decorator
def a__ ( *lowerCAmelCase : List[str] ):
'''simple docstring'''
def decorator(lowerCAmelCase : Tuple ):
UpperCAmelCase__ : Any = getattr(lowerCAmelCase , "handle_key" , [] )
handle += keys
setattr(lowerCAmelCase , "handle_key" , lowerCAmelCase )
return func
return decorator
class _lowercase ( lowerCAmelCase_ ):
'''simple docstring'''
def __new__( cls , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )-> List[str]:
UpperCAmelCase__ : List[Any] = super().__new__(cls , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
if not hasattr(__UpperCamelCase , "key_handler" ):
setattr(__UpperCamelCase , "key_handler" , {} )
setattr(__UpperCamelCase , "handle_input" , KeyHandler.handle_input )
for value in attrs.values():
UpperCAmelCase__ : List[str] = getattr(__UpperCamelCase , "handle_key" , [] )
for key in handled_keys:
UpperCAmelCase__ : List[Any] = value
return new_cls
@staticmethod
def lowerCAmelCase__ ( cls )-> Optional[int]:
UpperCAmelCase__ : Optional[Any] = get_character()
if char != KEYMAP["undefined"]:
UpperCAmelCase__ : Union[str, Any] = ord(__UpperCamelCase )
UpperCAmelCase__ : Optional[int] = cls.key_handler.get(__UpperCamelCase )
if handler:
UpperCAmelCase__ : Optional[int] = char
return handler(cls )
else:
return None
def a__ ( cls : Union[str, Any] ):
'''simple docstring'''
return KeyHandler(cls.__name__ , cls.__bases__ , cls.__dict__.copy() )
| 660 |
"""simple docstring"""
import timeit
import numpy as np
import datasets
from datasets.arrow_writer import ArrowWriter
from datasets.features.features import _ArrayXD
def a__ ( lowerCAmelCase : List[str] ):
'''simple docstring'''
def wrapper(*lowerCAmelCase : Any , **lowerCAmelCase : Tuple ):
UpperCAmelCase__ : Optional[int] = timeit.default_timer()
UpperCAmelCase__ : int = func(*lowerCAmelCase , **lowerCAmelCase )
UpperCAmelCase__ : List[Any] = timeit.default_timer() - starttime
return delta
UpperCAmelCase__ : int = func.__name__
return wrapper
def a__ ( lowerCAmelCase : dict , lowerCAmelCase : Optional[int]=100 , lowerCAmelCase : List[str]=None ):
'''simple docstring'''
UpperCAmelCase__ : str = []
UpperCAmelCase__ : Optional[Any] = seq_shapes or {}
for i in range(lowerCAmelCase ):
UpperCAmelCase__ : int = {}
for col_id, (k, v) in enumerate(features.items() ):
if isinstance(lowerCAmelCase , _ArrayXD ):
UpperCAmelCase__ : List[str] = np.random.rand(*v.shape ).astype(v.dtype )
elif isinstance(lowerCAmelCase , datasets.Value ):
if v.dtype == "string":
UpperCAmelCase__ : Dict = "The small grey turtle was surprisingly fast when challenged."
else:
UpperCAmelCase__ : str = np.random.randint(10 , size=1 ).astype(v.dtype ).item()
elif isinstance(lowerCAmelCase , datasets.Sequence ):
while isinstance(lowerCAmelCase , datasets.Sequence ):
UpperCAmelCase__ : List[str] = v.feature
UpperCAmelCase__ : Optional[int] = seq_shapes[k]
UpperCAmelCase__ : Optional[int] = np.random.rand(*lowerCAmelCase ).astype(v.dtype )
UpperCAmelCase__ : Union[str, Any] = data
dummy_data.append((i, example) )
return dummy_data
def a__ ( lowerCAmelCase : List[str] , lowerCAmelCase : Tuple , lowerCAmelCase : List[str]=100 , lowerCAmelCase : Optional[int]=None ):
'''simple docstring'''
UpperCAmelCase__ : int = generate_examples(lowerCAmelCase , num_examples=lowerCAmelCase , seq_shapes=lowerCAmelCase )
with ArrowWriter(features=lowerCAmelCase , path=lowerCAmelCase ) as writer:
for key, record in dummy_data:
UpperCAmelCase__ : List[Any] = features.encode_example(lowerCAmelCase )
writer.write(lowerCAmelCase )
UpperCAmelCase__ , UpperCAmelCase__ : Optional[Any] = writer.finalize()
if not num_final_examples == num_examples:
raise ValueError(
F"Error writing the dataset, wrote {num_final_examples} examples but should have written {num_examples}." )
UpperCAmelCase__ : Optional[int] = datasets.Dataset.from_file(filename=lowerCAmelCase , info=datasets.DatasetInfo(features=lowerCAmelCase ) )
return dataset
| 660 | 1 |
"""simple docstring"""
import enum
import os
from hashlib import shaaaa
from typing import Optional
from .. import config
from .logging import get_logger
A__ : Any = get_logger(__name__)
class _lowercase ( enum.Enum ):
'''simple docstring'''
_A = 'all_checks'
_A = 'basic_checks'
_A = 'no_checks'
class _lowercase ( lowerCAmelCase_ ):
'''simple docstring'''
class _lowercase ( lowerCAmelCase_ ):
'''simple docstring'''
class _lowercase ( lowerCAmelCase_ ):
'''simple docstring'''
class _lowercase ( lowerCAmelCase_ ):
'''simple docstring'''
def a__ ( lowerCAmelCase : Optional[dict] , lowerCAmelCase : dict , lowerCAmelCase : int=None ):
'''simple docstring'''
if expected_checksums is None:
logger.info("Unable to verify checksums." )
return
if len(set(lowerCAmelCase ) - set(lowerCAmelCase ) ) > 0:
raise ExpectedMoreDownloadedFiles(str(set(lowerCAmelCase ) - set(lowerCAmelCase ) ) )
if len(set(lowerCAmelCase ) - set(lowerCAmelCase ) ) > 0:
raise UnexpectedDownloadedFile(str(set(lowerCAmelCase ) - set(lowerCAmelCase ) ) )
UpperCAmelCase__ : List[str] = [url for url in expected_checksums if expected_checksums[url] != recorded_checksums[url]]
UpperCAmelCase__ : List[str] = " for " + verification_name if verification_name is not None else ""
if len(lowerCAmelCase ) > 0:
raise NonMatchingChecksumError(
F"Checksums didn't match{for_verification_name}:\n"
F"{bad_urls}\n"
"Set `verification_mode='no_checks'` to skip checksums verification and ignore this error" )
logger.info("All the checksums matched successfully" + for_verification_name )
class _lowercase ( lowerCAmelCase_ ):
'''simple docstring'''
class _lowercase ( lowerCAmelCase_ ):
'''simple docstring'''
class _lowercase ( lowerCAmelCase_ ):
'''simple docstring'''
class _lowercase ( lowerCAmelCase_ ):
'''simple docstring'''
def a__ ( lowerCAmelCase : Optional[dict] , lowerCAmelCase : dict ):
'''simple docstring'''
if expected_splits is None:
logger.info("Unable to verify splits sizes." )
return
if len(set(lowerCAmelCase ) - set(lowerCAmelCase ) ) > 0:
raise ExpectedMoreSplits(str(set(lowerCAmelCase ) - set(lowerCAmelCase ) ) )
if len(set(lowerCAmelCase ) - set(lowerCAmelCase ) ) > 0:
raise UnexpectedSplits(str(set(lowerCAmelCase ) - set(lowerCAmelCase ) ) )
UpperCAmelCase__ : Tuple = [
{"expected": expected_splits[name], "recorded": recorded_splits[name]}
for name in expected_splits
if expected_splits[name].num_examples != recorded_splits[name].num_examples
]
if len(lowerCAmelCase ) > 0:
raise NonMatchingSplitsSizesError(str(lowerCAmelCase ) )
logger.info("All the splits matched successfully." )
def a__ ( lowerCAmelCase : str , lowerCAmelCase : bool = True ):
'''simple docstring'''
if record_checksum:
UpperCAmelCase__ : int = shaaaa()
with open(lowerCAmelCase , "rb" ) as f:
for chunk in iter(lambda: f.read(1 << 20 ) , b"" ):
m.update(lowerCAmelCase )
UpperCAmelCase__ : int = m.hexdigest()
else:
UpperCAmelCase__ : int = None
return {"num_bytes": os.path.getsize(lowerCAmelCase ), "checksum": checksum}
def a__ ( lowerCAmelCase : int ):
'''simple docstring'''
if dataset_size and config.IN_MEMORY_MAX_SIZE:
return dataset_size < config.IN_MEMORY_MAX_SIZE
else:
return False
| 660 |
"""simple docstring"""
from manim import *
class _lowercase ( lowerCAmelCase_ ):
'''simple docstring'''
def lowerCAmelCase__ ( self )-> Tuple:
UpperCAmelCase__ : str = Rectangle(height=0.5 , width=0.5 )
UpperCAmelCase__ : List[str] = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0 )
UpperCAmelCase__ : List[Any] = [mem.copy() for i in range(6 )]
UpperCAmelCase__ : Union[str, Any] = [mem.copy() for i in range(6 )]
UpperCAmelCase__ : List[Any] = VGroup(*__UpperCamelCase ).arrange(__UpperCamelCase , buff=0 )
UpperCAmelCase__ : int = VGroup(*__UpperCamelCase ).arrange(__UpperCamelCase , buff=0 )
UpperCAmelCase__ : Tuple = VGroup(__UpperCamelCase , __UpperCamelCase ).arrange(__UpperCamelCase , buff=0 )
UpperCAmelCase__ : Tuple = Text("CPU" , font_size=24 )
UpperCAmelCase__ : Any = Group(__UpperCamelCase , __UpperCamelCase ).arrange(__UpperCamelCase , buff=0.5 , aligned_edge=__UpperCamelCase )
cpu.move_to([-2.5, -0.5, 0] )
self.add(__UpperCamelCase )
UpperCAmelCase__ : List[Any] = [mem.copy() for i in range(4 )]
UpperCAmelCase__ : List[str] = VGroup(*__UpperCamelCase ).arrange(__UpperCamelCase , buff=0 )
UpperCAmelCase__ : Union[str, Any] = Text("GPU" , font_size=24 )
UpperCAmelCase__ : Dict = Group(__UpperCamelCase , __UpperCamelCase ).arrange(__UpperCamelCase , buff=0.5 , aligned_edge=__UpperCamelCase )
gpu.move_to([-1, -1, 0] )
self.add(__UpperCamelCase )
UpperCAmelCase__ : Optional[int] = [mem.copy() for i in range(6 )]
UpperCAmelCase__ : List[str] = VGroup(*__UpperCamelCase ).arrange(__UpperCamelCase , buff=0 )
UpperCAmelCase__ : Tuple = Text("Model" , font_size=24 )
UpperCAmelCase__ : Dict = Group(__UpperCamelCase , __UpperCamelCase ).arrange(__UpperCamelCase , buff=0.5 , aligned_edge=__UpperCamelCase )
model.move_to([3, -1.0, 0] )
self.add(__UpperCamelCase )
UpperCAmelCase__ : List[str] = []
for i, rect in enumerate(__UpperCamelCase ):
rect.set_stroke(__UpperCamelCase )
# target = fill.copy().set_fill(YELLOW, opacity=0.7)
# target.move_to(rect)
# self.add(target)
UpperCAmelCase__ : int = Rectangle(height=0.46 / 4 , width=0.46 / 3 ).set_stroke(width=0.0 ).set_fill(__UpperCamelCase , opacity=0.7 )
if i == 0:
cpu_target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT ) , buff=0.02 , direction=__UpperCamelCase )
cpu_target.set_x(cpu_target.get_x() + 0.1 )
elif i == 3:
cpu_target.next_to(cpu_targs[0] , direction=__UpperCamelCase , buff=0.0 )
else:
cpu_target.next_to(cpu_targs[i - 1] , direction=__UpperCamelCase , buff=0.0 )
self.add(__UpperCamelCase )
cpu_targs.append(__UpperCamelCase )
UpperCAmelCase__ : List[Any] = [mem.copy() for i in range(6 )]
UpperCAmelCase__ : Any = VGroup(*__UpperCamelCase ).arrange(__UpperCamelCase , buff=0 )
UpperCAmelCase__ : Tuple = Text("Loaded Checkpoint" , font_size=24 )
UpperCAmelCase__ : Any = Group(__UpperCamelCase , __UpperCamelCase ).arrange(__UpperCamelCase , aligned_edge=__UpperCamelCase , buff=0.4 )
checkpoint.move_to([3, 0.5, 0] )
UpperCAmelCase__ : Optional[Any] = Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
UpperCAmelCase__ : Any = MarkupText(
F"<b>Key:</b>\n\n<span fgcolor='{YELLOW}'>●</span> Empty Model" , font_size=18 , )
key_text.move_to([-5, 2.4, 0] )
self.add(__UpperCamelCase , __UpperCamelCase )
UpperCAmelCase__ : str = MarkupText(
F"<span fgcolor='{BLUE}'>●</span> Checkpoint" , font_size=18 , )
blue_text.next_to(__UpperCamelCase , DOWN * 2.4 , aligned_edge=key_text.get_left() )
UpperCAmelCase__ : Optional[Any] = MarkupText(
F"Next, a <i><span fgcolor=\"{BLUE}\">second</span></i> model is loaded into memory,\nwith the weights of a <span fgcolor=\"{BLUE}\">single shard</span>." , font_size=24 , )
step_a.move_to([2, 2, 0] )
self.play(Write(__UpperCamelCase ) , Write(__UpperCamelCase ) )
self.play(Write(__UpperCamelCase , run_time=1 ) , Create(__UpperCamelCase , run_time=1 ) )
UpperCAmelCase__ : Union[str, Any] = []
UpperCAmelCase__ : List[str] = []
for i, rect in enumerate(__UpperCamelCase ):
UpperCAmelCase__ : Optional[Any] = fill.copy().set_fill(__UpperCamelCase , opacity=0.7 )
target.move_to(__UpperCamelCase )
first_animations.append(GrowFromCenter(__UpperCamelCase , run_time=1 ) )
UpperCAmelCase__ : List[str] = target.copy()
cpu_target.generate_target()
if i < 5:
cpu_target.target.move_to(cpu_left_col_base[i + 1] )
else:
cpu_target.target.move_to(cpu_right_col_base[i - 5] )
second_animations.append(MoveToTarget(__UpperCamelCase , run_time=1.5 ) )
self.play(*__UpperCamelCase )
self.play(*__UpperCamelCase )
self.wait()
| 660 | 1 |
"""simple docstring"""
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from ..models.speechta import SpeechTaForTextToSpeech, SpeechTaHifiGan, SpeechTaProcessor
from ..utils import is_datasets_available
from .base import PipelineTool
if is_datasets_available():
from datasets import load_dataset
class _lowercase ( lowerCAmelCase_ ):
'''simple docstring'''
_A = 'microsoft/speecht5_tts'
_A = (
'This is a tool that reads an English text out loud. It takes an input named `text` which should contain the '
'text to read (in English) and returns a waveform object containing the sound.'
)
_A = 'text_reader'
_A = SpeechTaProcessor
_A = SpeechTaForTextToSpeech
_A = SpeechTaHifiGan
_A = ['text']
_A = ['audio']
def lowerCAmelCase__ ( self )-> Tuple:
if self.post_processor is None:
UpperCAmelCase__ : Optional[int] = "microsoft/speecht5_hifigan"
super().setup()
def lowerCAmelCase__ ( self , __UpperCamelCase , __UpperCamelCase=None )-> Optional[Any]:
UpperCAmelCase__ : Tuple = self.pre_processor(text=__UpperCamelCase , return_tensors="pt" , truncation=__UpperCamelCase )
if speaker_embeddings is None:
if not is_datasets_available():
raise ImportError("Datasets needs to be installed if not passing speaker embeddings." )
UpperCAmelCase__ : Optional[int] = load_dataset("Matthijs/cmu-arctic-xvectors" , split="validation" )
UpperCAmelCase__ : Optional[Any] = torch.tensor(embeddings_dataset[73_05]["xvector"] ).unsqueeze(0 )
return {"input_ids": inputs["input_ids"], "speaker_embeddings": speaker_embeddings}
def lowerCAmelCase__ ( self , __UpperCamelCase )-> List[Any]:
with torch.no_grad():
return self.model.generate_speech(**__UpperCamelCase )
def lowerCAmelCase__ ( self , __UpperCamelCase )-> Dict:
with torch.no_grad():
return self.post_processor(__UpperCamelCase ).cpu().detach()
| 660 |
"""simple docstring"""
import warnings
from pathlib import Path
from typing import List, Tuple, Union
import fire
from torch import nn
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer, PreTrainedModel
from transformers.utils import logging
A__ : Tuple = logging.get_logger(__name__)
def a__ ( lowerCAmelCase : nn.ModuleList , lowerCAmelCase : nn.ModuleList , lowerCAmelCase : List[int] ):
'''simple docstring'''
UpperCAmelCase__ : Optional[Any] = nn.ModuleList([src_layers[i] for i in layers_to_copy] )
assert len(lowerCAmelCase ) == len(lowerCAmelCase ), F"{len(lowerCAmelCase )} != {len(lowerCAmelCase )}"
dest_layers.load_state_dict(layers_to_copy.state_dict() )
A__ : List[Any] = {
# maps num layers in teacher -> num_layers in student -> which teacher layers to copy.
# 12: bart, 16: pegasus, 6: marian/Helsinki-NLP
12: {
1: [0], # This says that if the teacher has 12 layers and the student has 1, copy layer 0 of the teacher
2: [0, 6],
3: [0, 6, 11],
4: [0, 4, 8, 11],
6: [0, 2, 4, 7, 9, 11],
9: [0, 1, 2, 4, 5, 7, 9, 10, 11],
12: list(range(12)),
},
16: { # maps num layers in student -> which teacher layers to copy
1: [0],
2: [0, 15],
3: [0, 8, 15],
4: [0, 5, 10, 15],
6: [0, 3, 6, 9, 12, 15],
8: [0, 2, 4, 6, 8, 10, 12, 15],
9: [0, 1, 3, 5, 7, 9, 11, 13, 15],
12: [0, 1, 2, 3, 4, 5, 6, 7, 9, 11, 13, 15],
16: list(range(16)),
},
6: {1: [0], 2: [0, 5], 3: [0, 2, 5], 4: [0, 1, 3, 5], 6: list(range(6))},
}
A__ : List[Any] = {
# maps num layers in student -> which teacher layers to copy.
6: {1: [5], 2: [3, 5], 3: [1, 4, 5], 4: [1, 2, 4, 5]},
12: {1: [11], 2: [5, 11], 3: [3, 7, 11], 6: [1, 3, 5, 8, 10, 11]},
16: {1: [15], 4: [4, 9, 12, 15], 8: [1, 3, 5, 7, 9, 11, 13, 15]},
}
def a__ ( lowerCAmelCase : Dict , lowerCAmelCase : Dict ):
'''simple docstring'''
try:
UpperCAmelCase__ : Tuple = LAYERS_TO_COPY[n_teacher][n_student]
return val
except KeyError:
if n_student != n_teacher:
warnings.warn(
F"no hardcoded layers to copy for teacher {n_teacher} -> student {n_student}, defaulting to first"
F" {n_student}" )
return list(range(lowerCAmelCase ) )
def a__ ( lowerCAmelCase : int , lowerCAmelCase : Tuple ):
'''simple docstring'''
if n_student > n_teacher:
raise ValueError(F"Cannot perform intermediate supervision for student {n_student} > teacher {n_teacher}" )
elif n_teacher == n_student:
return list(range(lowerCAmelCase ) )
elif n_student == 1:
return [n_teacher - 1]
else:
return LAYERS_TO_SUPERVISE[n_teacher][n_student]
def a__ ( lowerCAmelCase : Union[str, PreTrainedModel] , lowerCAmelCase : Union[str, Path] = "student" , lowerCAmelCase : Union[int, None] = None , lowerCAmelCase : Union[int, None] = None , lowerCAmelCase : List[str]=False , lowerCAmelCase : List[str]=None , lowerCAmelCase : List[str]=None , **lowerCAmelCase : List[str] , ):
'''simple docstring'''
UpperCAmelCase__ : List[str] = "encoder_layers and decoder_layers cannot be both None-- you would just have an identical teacher."
assert (e is not None) or (d is not None), _msg
if isinstance(lowerCAmelCase , lowerCAmelCase ):
AutoTokenizer.from_pretrained(lowerCAmelCase ).save_pretrained(lowerCAmelCase ) # purely for convenience
UpperCAmelCase__ : List[str] = AutoModelForSeqaSeqLM.from_pretrained(lowerCAmelCase ).eval()
else:
assert isinstance(lowerCAmelCase , lowerCAmelCase ), F"teacher must be a model or string got type {type(lowerCAmelCase )}"
UpperCAmelCase__ : int = teacher.config.to_diff_dict()
try:
UpperCAmelCase__ , UpperCAmelCase__ : Optional[Any] = teacher.config.encoder_layers, teacher.config.decoder_layers
if e is None:
UpperCAmelCase__ : Tuple = teacher_e
if d is None:
UpperCAmelCase__ : str = teacher_d
init_kwargs.update({"encoder_layers": e, "decoder_layers": d} )
except AttributeError: # T5
if hasattr(teacher.config , "num_encoder_layers" ):
UpperCAmelCase__ , UpperCAmelCase__ : Union[str, Any] = teacher.config.num_encoder_layers, teacher.config.num_decoder_layers
else:
UpperCAmelCase__ , UpperCAmelCase__ : Union[str, Any] = teacher.config.num_layers, teacher.config.num_decoder_layers
if e is None:
UpperCAmelCase__ : Optional[Any] = teacher_e
if d is None:
UpperCAmelCase__ : Optional[Any] = teacher_d
if hasattr(teacher.config , "num_encoder_layers" ):
init_kwargs.update({"num_encoder_layers": e, "num_decoder_layers": d} )
else:
init_kwargs.update({"num_layers": e, "num_decoder_layers": d} )
# Kwargs to instantiate student: teacher kwargs with updated layer numbers + **extra_config_kwargs
init_kwargs.update(lowerCAmelCase )
# Copy weights
UpperCAmelCase__ : Tuple = teacher.config_class(**lowerCAmelCase )
UpperCAmelCase__ : List[str] = AutoModelForSeqaSeqLM.from_config(lowerCAmelCase )
# Start by copying the full teacher state dict this will copy the first N teacher layers to the student.
UpperCAmelCase__ : Optional[int] = student.load_state_dict(teacher.state_dict() , strict=lowerCAmelCase )
assert info.missing_keys == [], info.missing_keys # every student key should have a teacher keys.
if copy_first_teacher_layers: # Our copying is done. We just log and save
UpperCAmelCase__ , UpperCAmelCase__ : int = list(range(lowerCAmelCase ) ), list(range(lowerCAmelCase ) )
logger.info(
F"Copied encoder layers {e_layers_to_copy} and decoder layers {d_layers_to_copy}. Saving them to"
F" {save_path}" )
student.save_pretrained(lowerCAmelCase )
return student, e_layers_to_copy, d_layers_to_copy
# Decide which layers of the teacher to copy. Not exactly alternating -- we try to keep first and last layer.
if e_layers_to_copy is None:
UpperCAmelCase__ : List[int] = pick_layers_to_copy(lowerCAmelCase , lowerCAmelCase )
if d_layers_to_copy is None:
UpperCAmelCase__ : List[int] = pick_layers_to_copy(lowerCAmelCase , lowerCAmelCase )
try:
if hasattr(
lowerCAmelCase , "prophetnet" ): # For ProphetNet, student.model.encoder.layers is called student.prophetnet.encoder.layers
copy_layers(teacher.prophetnet.encoder.layers , student.prophetnet.encoder.layers , lowerCAmelCase )
copy_layers(teacher.prophetnet.decoder.layers , student.prophetnet.decoder.layers , lowerCAmelCase )
else:
copy_layers(teacher.model.encoder.layers , student.model.encoder.layers , lowerCAmelCase )
copy_layers(teacher.model.decoder.layers , student.model.decoder.layers , lowerCAmelCase )
except AttributeError: # For t5, student.model.encoder.layers is called student.encoder.block
copy_layers(teacher.encoder.block , student.encoder.block , lowerCAmelCase )
copy_layers(teacher.decoder.block , student.decoder.block , lowerCAmelCase )
logger.info(
F"Copied encoder layers {e_layers_to_copy} and decoder layers {d_layers_to_copy}. Saving them to {save_path}" )
UpperCAmelCase__ : int = {
"teacher_type": teacher.config.model_type,
"copied_encoder_layers": e_layers_to_copy,
"copied_decoder_layers": d_layers_to_copy,
}
student.save_pretrained(lowerCAmelCase )
# Save information about copying for easier reproducibility
return student, e_layers_to_copy, d_layers_to_copy
if __name__ == "__main__":
fire.Fire(create_student_by_copying_alternating_layers)
| 660 | 1 |
"""simple docstring"""
from itertools import permutations
def a__ ( lowerCAmelCase : tuple ):
'''simple docstring'''
if num[3] % 2 != 0:
return False
if (num[2] + num[3] + num[4]) % 3 != 0:
return False
if num[5] % 5 != 0:
return False
UpperCAmelCase__ : Union[str, Any] = [7, 11, 13, 17]
for i, test in enumerate(lowerCAmelCase ):
if (num[i + 4] * 100 + num[i + 5] * 10 + num[i + 6]) % test != 0:
return False
return True
def a__ ( lowerCAmelCase : int = 10 ):
'''simple docstring'''
return sum(
int("".join(map(lowerCAmelCase , lowerCAmelCase ) ) )
for num in permutations(range(lowerCAmelCase ) )
if is_substring_divisible(lowerCAmelCase ) )
if __name__ == "__main__":
print(f"""{solution() = }""")
| 660 |
"""simple docstring"""
import unittest
import numpy as np
from diffusers import LMSDiscreteScheduler, OnnxStableDiffusionInpaintPipeline
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
nightly,
require_onnxruntime,
require_torch_gpu,
)
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class _lowercase ( lowerCAmelCase_ , unittest.TestCase ):
'''simple docstring'''
pass
@nightly
@require_onnxruntime
@require_torch_gpu
class _lowercase ( unittest.TestCase ):
'''simple docstring'''
@property
def lowerCAmelCase__ ( self )-> int:
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def lowerCAmelCase__ ( self )-> Any:
UpperCAmelCase__ : Tuple = ort.SessionOptions()
UpperCAmelCase__ : List[str] = False
return options
def lowerCAmelCase__ ( self )-> List[str]:
UpperCAmelCase__ : Union[str, Any] = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/in_paint/overture-creations-5sI6fQgYIuo.png" )
UpperCAmelCase__ : int = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/in_paint/overture-creations-5sI6fQgYIuo_mask.png" )
UpperCAmelCase__ : str = OnnxStableDiffusionInpaintPipeline.from_pretrained(
"runwayml/stable-diffusion-inpainting" , revision="onnx" , safety_checker=__UpperCamelCase , feature_extractor=__UpperCamelCase , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=__UpperCamelCase )
UpperCAmelCase__ : int = "A red cat sitting on a park bench"
UpperCAmelCase__ : Tuple = np.random.RandomState(0 )
UpperCAmelCase__ : Any = pipe(
prompt=__UpperCamelCase , image=__UpperCamelCase , mask_image=__UpperCamelCase , guidance_scale=7.5 , num_inference_steps=10 , generator=__UpperCamelCase , output_type="np" , )
UpperCAmelCase__ : Tuple = output.images
UpperCAmelCase__ : Dict = images[0, 2_55:2_58, 2_55:2_58, -1]
assert images.shape == (1, 5_12, 5_12, 3)
UpperCAmelCase__ : Union[str, Any] = np.array([0.2514, 0.3007, 0.3517, 0.1790, 0.2382, 0.3167, 0.1944, 0.2273, 0.2464] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def lowerCAmelCase__ ( self )-> Tuple:
UpperCAmelCase__ : Tuple = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/in_paint/overture-creations-5sI6fQgYIuo.png" )
UpperCAmelCase__ : Tuple = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/in_paint/overture-creations-5sI6fQgYIuo_mask.png" )
UpperCAmelCase__ : Optional[Any] = LMSDiscreteScheduler.from_pretrained(
"runwayml/stable-diffusion-inpainting" , subfolder="scheduler" , revision="onnx" )
UpperCAmelCase__ : Optional[Any] = OnnxStableDiffusionInpaintPipeline.from_pretrained(
"runwayml/stable-diffusion-inpainting" , revision="onnx" , scheduler=__UpperCamelCase , safety_checker=__UpperCamelCase , feature_extractor=__UpperCamelCase , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=__UpperCamelCase )
UpperCAmelCase__ : int = "A red cat sitting on a park bench"
UpperCAmelCase__ : List[str] = np.random.RandomState(0 )
UpperCAmelCase__ : str = pipe(
prompt=__UpperCamelCase , image=__UpperCamelCase , mask_image=__UpperCamelCase , guidance_scale=7.5 , num_inference_steps=20 , generator=__UpperCamelCase , output_type="np" , )
UpperCAmelCase__ : List[str] = output.images
UpperCAmelCase__ : List[Any] = images[0, 2_55:2_58, 2_55:2_58, -1]
assert images.shape == (1, 5_12, 5_12, 3)
UpperCAmelCase__ : int = np.array([0.0086, 0.0077, 0.0083, 0.0093, 0.0107, 0.0139, 0.0094, 0.0097, 0.0125] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
| 660 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
A__ : str = {
"""configuration_git""": ["""GIT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """GitConfig""", """GitVisionConfig"""],
"""processing_git""": ["""GitProcessor"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ : Union[str, Any] = [
"""GIT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""GitForCausalLM""",
"""GitModel""",
"""GitPreTrainedModel""",
"""GitVisionModel""",
]
if TYPE_CHECKING:
from .configuration_git import GIT_PRETRAINED_CONFIG_ARCHIVE_MAP, GitConfig, GitVisionConfig
from .processing_git import GitProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_git import (
GIT_PRETRAINED_MODEL_ARCHIVE_LIST,
GitForCausalLM,
GitModel,
GitPreTrainedModel,
GitVisionModel,
)
else:
import sys
A__ : int = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 660 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
A__ : Union[str, Any] = logging.get_logger(__name__)
A__ : Optional[int] = {
"""microsoft/table-transformer-detection""": (
"""https://huggingface.co/microsoft/table-transformer-detection/resolve/main/config.json"""
),
}
class _lowercase ( lowerCAmelCase_ ):
'''simple docstring'''
_A = 'table-transformer'
_A = ['past_key_values']
_A = {
'hidden_size': 'd_model',
'num_attention_heads': 'encoder_attention_heads',
}
def __init__( self , __UpperCamelCase=True , __UpperCamelCase=None , __UpperCamelCase=3 , __UpperCamelCase=1_00 , __UpperCamelCase=6 , __UpperCamelCase=20_48 , __UpperCamelCase=8 , __UpperCamelCase=6 , __UpperCamelCase=20_48 , __UpperCamelCase=8 , __UpperCamelCase=0.0 , __UpperCamelCase=0.0 , __UpperCamelCase=True , __UpperCamelCase="relu" , __UpperCamelCase=2_56 , __UpperCamelCase=0.1 , __UpperCamelCase=0.0 , __UpperCamelCase=0.0 , __UpperCamelCase=0.02 , __UpperCamelCase=1.0 , __UpperCamelCase=False , __UpperCamelCase="sine" , __UpperCamelCase="resnet50" , __UpperCamelCase=True , __UpperCamelCase=False , __UpperCamelCase=1 , __UpperCamelCase=5 , __UpperCamelCase=2 , __UpperCamelCase=1 , __UpperCamelCase=1 , __UpperCamelCase=5 , __UpperCamelCase=2 , __UpperCamelCase=0.1 , **__UpperCamelCase , )-> List[Any]:
if backbone_config is not None and use_timm_backbone:
raise ValueError("You can't specify both `backbone_config` and `use_timm_backbone`." )
if not use_timm_backbone:
if backbone_config is None:
logger.info("`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone." )
UpperCAmelCase__ : Any = CONFIG_MAPPING["resnet"](out_features=["stage4"] )
elif isinstance(__UpperCamelCase , __UpperCamelCase ):
UpperCAmelCase__ : int = backbone_config.get("model_type" )
UpperCAmelCase__ : Optional[Any] = CONFIG_MAPPING[backbone_model_type]
UpperCAmelCase__ : int = config_class.from_dict(__UpperCamelCase )
# set timm attributes to None
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : List[str] = None, None, None
UpperCAmelCase__ : Optional[int] = use_timm_backbone
UpperCAmelCase__ : Dict = backbone_config
UpperCAmelCase__ : List[Any] = num_channels
UpperCAmelCase__ : Any = num_queries
UpperCAmelCase__ : int = d_model
UpperCAmelCase__ : Optional[int] = encoder_ffn_dim
UpperCAmelCase__ : str = encoder_layers
UpperCAmelCase__ : Dict = encoder_attention_heads
UpperCAmelCase__ : Optional[Any] = decoder_ffn_dim
UpperCAmelCase__ : Tuple = decoder_layers
UpperCAmelCase__ : Optional[Any] = decoder_attention_heads
UpperCAmelCase__ : List[str] = dropout
UpperCAmelCase__ : Tuple = attention_dropout
UpperCAmelCase__ : List[Any] = activation_dropout
UpperCAmelCase__ : Dict = activation_function
UpperCAmelCase__ : Optional[Any] = init_std
UpperCAmelCase__ : List[str] = init_xavier_std
UpperCAmelCase__ : int = encoder_layerdrop
UpperCAmelCase__ : Tuple = decoder_layerdrop
UpperCAmelCase__ : int = encoder_layers
UpperCAmelCase__ : Dict = auxiliary_loss
UpperCAmelCase__ : Union[str, Any] = position_embedding_type
UpperCAmelCase__ : List[str] = backbone
UpperCAmelCase__ : List[Any] = use_pretrained_backbone
UpperCAmelCase__ : List[str] = dilation
# Hungarian matcher
UpperCAmelCase__ : Dict = class_cost
UpperCAmelCase__ : Any = bbox_cost
UpperCAmelCase__ : Tuple = giou_cost
# Loss coefficients
UpperCAmelCase__ : Any = mask_loss_coefficient
UpperCAmelCase__ : Dict = dice_loss_coefficient
UpperCAmelCase__ : Any = bbox_loss_coefficient
UpperCAmelCase__ : Tuple = giou_loss_coefficient
UpperCAmelCase__ : List[Any] = eos_coefficient
super().__init__(is_encoder_decoder=__UpperCamelCase , **__UpperCamelCase )
@property
def lowerCAmelCase__ ( self )-> int:
return self.encoder_attention_heads
@property
def lowerCAmelCase__ ( self )-> int:
return self.d_model
class _lowercase ( lowerCAmelCase_ ):
'''simple docstring'''
_A = version.parse('1.11' )
@property
def lowerCAmelCase__ ( self )-> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
("pixel_mask", {0: "batch"}),
] )
@property
def lowerCAmelCase__ ( self )-> float:
return 1E-5
@property
def lowerCAmelCase__ ( self )-> int:
return 12
| 660 | 1 |
"""simple docstring"""
from pathlib import PurePosixPath
from typing import Optional
import fsspec
from fsspec import AbstractFileSystem
from huggingface_hub.hf_api import DatasetInfo
from ..utils.file_utils import get_authentication_headers_for_url
from ..utils.hub import hf_hub_url
class _lowercase ( lowerCAmelCase_ ):
'''simple docstring'''
_A = ''
_A = 'hf-legacy' # "hf://"" is reserved for hffs
def __init__( self , __UpperCamelCase = None , __UpperCamelCase = None , **__UpperCamelCase , )-> str:
super().__init__(self , **__UpperCamelCase )
UpperCAmelCase__ : int = repo_info
UpperCAmelCase__ : Optional[int] = token
UpperCAmelCase__ : Optional[Any] = None
def lowerCAmelCase__ ( self )-> Optional[Any]:
if self.dir_cache is None:
UpperCAmelCase__ : str = {}
for hf_file in self.repo_info.siblings:
# TODO(QL): add sizes
UpperCAmelCase__ : str = {
"name": hf_file.rfilename,
"size": None,
"type": "file",
}
self.dir_cache.update(
{
str(__UpperCamelCase ): {"name": str(__UpperCamelCase ), "size": None, "type": "directory"}
for d in list(PurePosixPath(hf_file.rfilename ).parents )[:-1]
} )
def lowerCAmelCase__ ( self , __UpperCamelCase , __UpperCamelCase = "rb" , **__UpperCamelCase , )-> List[Any]:
if not isinstance(self.repo_info , __UpperCamelCase ):
raise NotImplementedError(F"Open is only implemented for dataset repositories, but got {self.repo_info}" )
UpperCAmelCase__ : Union[str, Any] = hf_hub_url(self.repo_info.id , __UpperCamelCase , revision=self.repo_info.sha )
return fsspec.open(
__UpperCamelCase , mode=__UpperCamelCase , headers=get_authentication_headers_for_url(__UpperCamelCase , use_auth_token=self.token ) , client_kwargs={"trust_env": True} , ).open()
def lowerCAmelCase__ ( self , __UpperCamelCase , **__UpperCamelCase )-> List[str]:
self._get_dirs()
UpperCAmelCase__ : Union[str, Any] = self._strip_protocol(__UpperCamelCase )
if path in self.dir_cache:
return self.dir_cache[path]
else:
raise FileNotFoundError(__UpperCamelCase )
def lowerCAmelCase__ ( self , __UpperCamelCase , __UpperCamelCase=False , **__UpperCamelCase )-> str:
self._get_dirs()
UpperCAmelCase__ : str = PurePosixPath(path.strip("/" ) )
UpperCAmelCase__ : Optional[Any] = {}
for p, f in self.dir_cache.items():
UpperCAmelCase__ : Optional[int] = PurePosixPath(p.strip("/" ) )
UpperCAmelCase__ : Dict = p.parent
if root == path:
UpperCAmelCase__ : Tuple = f
UpperCAmelCase__ : List[Any] = list(paths.values() )
if detail:
return out
else:
return sorted(f["name"] for f in out )
| 660 |
"""simple docstring"""
import argparse
import shutil
import time
from json import JSONDecodeError
from logging import getLogger
from pathlib import Path
from typing import Dict, List
import torch
from torch.utils.data import DataLoader
from tqdm import tqdm
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
from utils import (
SeqaSeqDataset,
calculate_bleu,
calculate_rouge,
chunks,
lmap,
load_json,
parse_numeric_n_bool_cl_kwargs,
save_json,
use_task_specific_params,
write_txt_file,
)
A__ : int = getLogger(__name__)
def a__ ( lowerCAmelCase : List[Any] , lowerCAmelCase : str , lowerCAmelCase : str , lowerCAmelCase : int = 8 , lowerCAmelCase : int = 1024 , lowerCAmelCase : List[Any]="val" , lowerCAmelCase : str=None , lowerCAmelCase : int=False , lowerCAmelCase : Dict="summarization" , lowerCAmelCase : int=None , lowerCAmelCase : List[str]=1 , lowerCAmelCase : Dict = None , lowerCAmelCase : List[str]="" , **lowerCAmelCase : int , ):
'''simple docstring'''
UpperCAmelCase__ : Dict = str(lowerCAmelCase )
assert local_rank is not None
torch.distributed.init_process_group(backend="nccl" , rank=lowerCAmelCase )
UpperCAmelCase__ : List[str] = Path(lowerCAmelCase )
UpperCAmelCase__ : str = save_dir.joinpath(F"rank_{local_rank}_output.json" )
torch.cuda.set_device(lowerCAmelCase )
UpperCAmelCase__ : Optional[Any] = AutoModelForSeqaSeqLM.from_pretrained(lowerCAmelCase ).cuda()
if fpaa:
UpperCAmelCase__ : List[Any] = model.half()
# determine if we need to increase num_beams
use_task_specific_params(lowerCAmelCase , lowerCAmelCase ) # update config with task specific params
UpperCAmelCase__ : List[Any] = generate_kwargs.pop("num_beams" , model.config.num_beams ) # AttributeError risk?
if num_return_sequences > num_beams:
UpperCAmelCase__ : Any = num_return_sequences
UpperCAmelCase__ : List[Any] = AutoTokenizer.from_pretrained(lowerCAmelCase )
logger.info(F"Inferred tokenizer type: {tokenizer.__class__}" ) # if this is wrong, check config.model_type.
if max_source_length is None:
UpperCAmelCase__ : int = tokenizer.model_max_length
if prefix is None:
UpperCAmelCase__ : Union[str, Any] = prefix or getattr(model.config , "prefix" , "" ) or ""
UpperCAmelCase__ : str = SeqaSeqDataset(
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , max_target_length=1024 , type_path=lowerCAmelCase , n_obs=lowerCAmelCase , prefix=lowerCAmelCase , **lowerCAmelCase , )
# I set shuffle=True for a more accurate progress bar.
# If all the longest samples are first, the prog bar estimate is too high at the beginning.
UpperCAmelCase__ : Union[str, Any] = ds.make_sortish_sampler(lowerCAmelCase , distributed=lowerCAmelCase , add_extra_examples=lowerCAmelCase , shuffle=lowerCAmelCase )
UpperCAmelCase__ : Optional[int] = DataLoader(lowerCAmelCase , sampler=lowerCAmelCase , batch_size=lowerCAmelCase , collate_fn=ds.collate_fn )
UpperCAmelCase__ : str = []
for batch in tqdm(lowerCAmelCase ):
UpperCAmelCase__ : Dict = model.generate(
input_ids=batch["input_ids"].to(model.device ) , attention_mask=batch["attention_mask"].to(model.device ) , num_return_sequences=lowerCAmelCase , num_beams=lowerCAmelCase , **lowerCAmelCase , )
UpperCAmelCase__ : int = tokenizer.batch_decode(lowerCAmelCase , skip_special_tokens=lowerCAmelCase , clean_up_tokenization_spaces=lowerCAmelCase )
UpperCAmelCase__ : int = batch["ids"]
if num_return_sequences > 1:
UpperCAmelCase__ : str = chunks(lowerCAmelCase , lowerCAmelCase ) # batch size chunks, each of size num_return_seq
for i, pred in enumerate(lowerCAmelCase ):
results.append({"pred": pred, "id": ids[i].item()} )
save_json(lowerCAmelCase , lowerCAmelCase )
return results, sampler.num_replicas
def a__ ( ):
'''simple docstring'''
UpperCAmelCase__ : str = argparse.ArgumentParser(
epilog="Unspecified args like --num_beams=2 --decoder_start_token_id=4 are passed to model.generate" )
parser.add_argument("--data_dir" , type=lowerCAmelCase , help="like cnn_dm/test.source" )
parser.add_argument(
"--model_name" , type=lowerCAmelCase , help="like facebook/bart-large-cnn,t5-base, etc." , default="sshleifer/distilbart-xsum-12-3" , )
parser.add_argument("--save_dir" , type=lowerCAmelCase , help="where to save" , default="tmp_gen" )
parser.add_argument("--max_source_length" , type=lowerCAmelCase , default=lowerCAmelCase )
parser.add_argument(
"--type_path" , type=lowerCAmelCase , default="test" , help="which subset to evaluate typically train/val/test" )
parser.add_argument("--task" , type=lowerCAmelCase , default="summarization" , help="used for task_specific_params + metrics" )
parser.add_argument("--bs" , type=lowerCAmelCase , default=8 , required=lowerCAmelCase , help="batch size" )
parser.add_argument(
"--local_rank" , type=lowerCAmelCase , default=-1 , required=lowerCAmelCase , help="should be passed by distributed.launch" )
parser.add_argument(
"--n_obs" , type=lowerCAmelCase , default=lowerCAmelCase , required=lowerCAmelCase , help="How many observations. Defaults to all." )
parser.add_argument(
"--num_return_sequences" , type=lowerCAmelCase , default=1 , required=lowerCAmelCase , help="How many sequences to return" )
parser.add_argument(
"--sync_timeout" , type=lowerCAmelCase , default=600 , required=lowerCAmelCase , help="How long should master process wait for other processes to finish." , )
parser.add_argument("--src_lang" , type=lowerCAmelCase , default=lowerCAmelCase , required=lowerCAmelCase )
parser.add_argument("--tgt_lang" , type=lowerCAmelCase , default=lowerCAmelCase , required=lowerCAmelCase )
parser.add_argument(
"--prefix" , type=lowerCAmelCase , required=lowerCAmelCase , default=lowerCAmelCase , help="will be added to the begininng of src examples" )
parser.add_argument("--fp16" , action="store_true" )
parser.add_argument("--debug" , action="store_true" )
UpperCAmelCase__ : Optional[int] = time.time()
UpperCAmelCase__ , UpperCAmelCase__ : List[Any] = parser.parse_known_args()
UpperCAmelCase__ : int = parse_numeric_n_bool_cl_kwargs(lowerCAmelCase )
if generate_kwargs and args.local_rank <= 0:
print(F"parsed the following generate kwargs: {generate_kwargs}" )
UpperCAmelCase__ : Dict = Path(args.save_dir + "_tmp" )
Path(lowerCAmelCase ).mkdir(exist_ok=lowerCAmelCase ) # this handles locking.
UpperCAmelCase__ : List[str] = list(json_save_dir.glob("rank_*.json" ) )
if intermediate_files:
raise ValueError(F"Found files at {json_save_dir} please move or remove them." )
# In theory, a node could finish and save before another node hits this. If this happens, we can address later.
UpperCAmelCase__ : List[str] = {}
if args.src_lang is not None:
UpperCAmelCase__ : str = args.src_lang
if args.tgt_lang is not None:
UpperCAmelCase__ : List[str] = args.tgt_lang
Path(args.save_dir ).mkdir(exist_ok=lowerCAmelCase )
UpperCAmelCase__ , UpperCAmelCase__ : Optional[int] = eval_data_dir(
args.data_dir , lowerCAmelCase , args.model_name , type_path=args.type_path , bs=args.bs , fpaa=args.fpaa , task=args.task , local_rank=args.local_rank , n_obs=args.n_obs , max_source_length=args.max_source_length , num_return_sequences=args.num_return_sequences , prefix=args.prefix , dataset_kwargs=lowerCAmelCase , **lowerCAmelCase , )
if args.local_rank <= 0:
UpperCAmelCase__ : str = Path(args.save_dir )
save_dir.mkdir(exist_ok=lowerCAmelCase )
UpperCAmelCase__ : Tuple = gather_results_from_each_node(lowerCAmelCase , lowerCAmelCase , args.sync_timeout )
UpperCAmelCase__ : Union[str, Any] = combine_partial_results(lowerCAmelCase )
if args.num_return_sequences > 1:
UpperCAmelCase__ : int = save_dir.joinpath("pseudolabel_results.json" )
print(F"Saving aggregated results at {save_path}, intermediate in {json_save_dir}/" )
save_json(lowerCAmelCase , lowerCAmelCase )
return
UpperCAmelCase__ : Optional[Any] = Path(args.data_dir ).joinpath(args.type_path + ".target" )
with open(lowerCAmelCase ) as f:
UpperCAmelCase__ : Optional[int] = [x.rstrip() for x in f.readlines()][: len(lowerCAmelCase )]
# Calculate metrics, save metrics, and save _generations.txt
UpperCAmelCase__ : List[Any] = "translation" in args.task
UpperCAmelCase__ : Optional[Any] = calculate_bleu if calc_bleu else calculate_rouge
UpperCAmelCase__ : Optional[Any] = "bleu" if calc_bleu else "rouge"
UpperCAmelCase__ : Dict = score_fn(lowerCAmelCase , lowerCAmelCase )
UpperCAmelCase__ : List[Any] = len(lowerCAmelCase )
UpperCAmelCase__ : Union[str, Any] = time.time() - start_time
UpperCAmelCase__ : Optional[int] = round(runtime / metrics["n_obs"] , 4 )
UpperCAmelCase__ : Tuple = num_replicas
# TODO(@stas00): add whatever metadata to metrics
UpperCAmelCase__ : Any = save_dir.joinpath(F"{args.type_path}_{metric_name}.json" )
save_json(lowerCAmelCase , lowerCAmelCase , indent=lowerCAmelCase )
print(lowerCAmelCase )
write_txt_file(lowerCAmelCase , save_dir.joinpath(F"{args.type_path}_generations.txt" ) )
if args.debug:
write_txt_file(lowerCAmelCase , save_dir.joinpath(F"{args.type_path}.target" ) )
else:
shutil.rmtree(lowerCAmelCase )
def a__ ( lowerCAmelCase : Tuple ):
'''simple docstring'''
UpperCAmelCase__ : str = []
for partial_result in partial_results:
records.extend(lowerCAmelCase )
UpperCAmelCase__ : Dict = sorted(lowerCAmelCase , key=lambda lowerCAmelCase : x["id"] )
UpperCAmelCase__ : List[str] = [x["pred"] for x in records]
return preds
def a__ ( lowerCAmelCase : List[Any] , lowerCAmelCase : int , lowerCAmelCase : Optional[int] ):
'''simple docstring'''
# WAIT FOR lots of .json files
UpperCAmelCase__ : int = time.time()
logger.info("waiting for all nodes to finish" )
UpperCAmelCase__ : Dict = None
while (time.time() - start_wait) < timeout:
UpperCAmelCase__ : str = list(save_dir.glob("rank_*.json" ) )
if len(lowerCAmelCase ) < num_replicas:
continue
try:
# make sure all json files are fully saved
UpperCAmelCase__ : Union[str, Any] = lmap(lowerCAmelCase , lowerCAmelCase )
return json_data
except JSONDecodeError:
continue
else:
raise TimeoutError("Rank 0 gave up on waiting for other processes" )
# Unreachable
if __name__ == "__main__":
# Usage for MT:
run_generate()
| 660 | 1 |
"""simple docstring"""
import json
import multiprocessing
import os
import re
from collections import defaultdict
import torch
from accelerate import Accelerator
from accelerate.utils import set_seed
from arguments import HumanEvalArguments
from datasets import load_dataset, load_metric
from torch.utils.data import IterableDataset
from torch.utils.data.dataloader import DataLoader
from tqdm import tqdm
import transformers
from transformers import AutoModelForCausalLM, AutoTokenizer, HfArgumentParser, StoppingCriteria, StoppingCriteriaList
A__ : Optional[int] = ["""\nclass""", """\ndef""", """\n#""", """\n@""", """\nprint""", """\nif"""]
class _lowercase ( lowerCAmelCase_ ):
'''simple docstring'''
def __init__( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase=None , __UpperCamelCase=1 )-> Union[str, Any]:
UpperCAmelCase__ : Optional[Any] = tokenizer
UpperCAmelCase__ : Any = dataset
UpperCAmelCase__ : int = len(__UpperCamelCase ) if n_tasks is None else n_tasks
UpperCAmelCase__ : Dict = n_copies
def __iter__( self )-> int:
UpperCAmelCase__ : int = []
for task in range(self.n_tasks ):
# without strip, the model generate commented codes ...
prompts.append(self.tokenizer.eos_token + self.dataset[task]["prompt"].strip() )
UpperCAmelCase__ : Union[str, Any] = self.tokenizer(__UpperCamelCase , padding=__UpperCamelCase , return_tensors="pt" )
for task in range(self.n_tasks ):
for _ in range(self.n_copies ):
yield {
"ids": outputs.input_ids[task],
"task_id": task,
"input_len": outputs.attention_mask[task].sum(),
}
class _lowercase ( lowerCAmelCase_ ):
'''simple docstring'''
def __init__( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )-> Optional[Any]:
UpperCAmelCase__ : Optional[int] = start_length
UpperCAmelCase__ : Union[str, Any] = eof_strings
UpperCAmelCase__ : int = tokenizer
def __call__( self , __UpperCamelCase , __UpperCamelCase , **__UpperCamelCase )-> str:
UpperCAmelCase__ : Optional[Any] = self.tokenizer.batch_decode(input_ids[:, self.start_length :] )
UpperCAmelCase__ : Optional[Any] = []
for decoded_generation in decoded_generations:
done.append(any(stop_string in decoded_generation for stop_string in self.eof_strings ) )
return all(__UpperCamelCase )
def a__ ( lowerCAmelCase : Dict ):
'''simple docstring'''
UpperCAmelCase__ : int = re.split("(%s)" % "|".join(lowerCAmelCase ) , lowerCAmelCase )
# last string should be ""
return "".join(string_list[:-2] )
def a__ ( lowerCAmelCase : Tuple , lowerCAmelCase : Any , lowerCAmelCase : str , lowerCAmelCase : str , lowerCAmelCase : Dict , lowerCAmelCase : List[str]=20 , **lowerCAmelCase : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase__ : List[str] = defaultdict(lowerCAmelCase ) # dict of list of generated tokens
for step, batch in tqdm(enumerate(lowerCAmelCase ) ):
with torch.no_grad():
UpperCAmelCase__ : List[str] = batch["ids"].shape[-1]
UpperCAmelCase__ : Dict = accelerator.unwrap_model(lowerCAmelCase ).generate(
input_ids=batch["ids"][:, : batch["input_len"]] , num_return_sequences=lowerCAmelCase , **lowerCAmelCase )
# each task is generated batch_size times
UpperCAmelCase__ : Any = batch["task_id"].repeat(lowerCAmelCase )
UpperCAmelCase__ : Optional[int] = accelerator.pad_across_processes(
lowerCAmelCase , dim=1 , pad_index=tokenizer.pad_token_id )
UpperCAmelCase__ , UpperCAmelCase__ : str = accelerator.gather((generated_tokens, generated_tasks) )
UpperCAmelCase__ : Optional[Any] = generated_tokens.cpu().numpy()
UpperCAmelCase__ : Any = generated_tasks.cpu().numpy()
for task, generated_tokens in zip(lowerCAmelCase , lowerCAmelCase ):
gen_token_dict[task].append(lowerCAmelCase )
UpperCAmelCase__ : Tuple = [[] for _ in range(lowerCAmelCase )]
for task, generated_tokens in gen_token_dict.items():
for s in generated_tokens:
UpperCAmelCase__ : Optional[int] = tokenizer.decode(lowerCAmelCase , skip_special_tokens=lowerCAmelCase , clean_up_tokenization_spaces=lowerCAmelCase )
code_gens[task].append(remove_last_block(lowerCAmelCase ) )
return code_gens
def a__ ( ):
'''simple docstring'''
# Setup configuration
UpperCAmelCase__ : List[str] = HfArgumentParser(lowerCAmelCase )
UpperCAmelCase__ : Dict = parser.parse_args()
transformers.logging.set_verbosity_error()
# enables code execution in code_eval metric
UpperCAmelCase__ : Dict = args.HF_ALLOW_CODE_EVAL
# make sure tokenizer plays nice with multiprocessing
UpperCAmelCase__ : Optional[int] = "false"
if args.num_workers is None:
UpperCAmelCase__ : Dict = multiprocessing.cpu_count()
# Use dataset load to feed to accelerate
UpperCAmelCase__ : List[Any] = Accelerator()
set_seed(args.seed , device_specific=lowerCAmelCase )
# Load model and tokenizer
UpperCAmelCase__ : Tuple = AutoTokenizer.from_pretrained(args.model_ckpt )
UpperCAmelCase__ : int = tokenizer.eos_token
UpperCAmelCase__ : Tuple = AutoModelForCausalLM.from_pretrained(args.model_ckpt )
# Generation settings
UpperCAmelCase__ : Any = {
"do_sample": args.do_sample,
"temperature": args.temperature,
"max_new_tokens": args.max_new_tokens,
"top_p": args.top_p,
"top_k": args.top_k,
"stopping_criteria": StoppingCriteriaList([EndOfFunctionCriteria(0 , lowerCAmelCase , lowerCAmelCase )] ),
}
# Load evaluation dataset and metric
UpperCAmelCase__ : Optional[Any] = load_dataset("openai_humaneval" )
UpperCAmelCase__ : int = load_metric("code_eval" )
UpperCAmelCase__ : Union[str, Any] = args.num_tasks if args.num_tasks is not None else len(human_eval["test"] )
UpperCAmelCase__ : Union[str, Any] = args.n_samples // args.batch_size
UpperCAmelCase__ : Union[str, Any] = TokenizedDataset(lowerCAmelCase , human_eval["test"] , n_copies=lowerCAmelCase , n_tasks=lowerCAmelCase )
# do not confuse args.batch_size, which is actually the num_return_sequences
UpperCAmelCase__ : List[Any] = DataLoader(lowerCAmelCase , batch_size=1 )
# Run a quick test to see if code evaluation is enabled
try:
UpperCAmelCase__ : List[Any] = code_eval_metric.compute(references=[""] , predictions=[[""]] )
except ValueError as exception:
print(
"Code evaluation not enabled. Read the warning below carefully and then use `--HF_ALLOW_CODE_EVAL=\"1\"`"
" flag to enable code evaluation." )
raise exception
UpperCAmelCase__ , UpperCAmelCase__ : Optional[Any] = accelerator.prepare(lowerCAmelCase , lowerCAmelCase )
UpperCAmelCase__ : Union[str, Any] = complete_code(
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , n_tasks=lowerCAmelCase , batch_size=args.batch_size , **lowerCAmelCase , )
if accelerator.is_main_process:
UpperCAmelCase__ : Optional[int] = []
for task in tqdm(range(lowerCAmelCase ) ):
UpperCAmelCase__ : List[Any] = human_eval["test"][task]["test"]
UpperCAmelCase__ : Optional[int] = F"check({human_eval['test'][task]['entry_point']})"
references.append("\n" + test_func + "\n" + entry_point )
# Evaluate completions with "code_eval" metric
UpperCAmelCase__ , UpperCAmelCase__ : Tuple = code_eval_metric.compute(
references=lowerCAmelCase , predictions=lowerCAmelCase , num_workers=args.num_workers )
print(F"Results: {pass_at_k}" )
# Save results to json file
with open(args.output_file , "w" ) as fp:
json.dump(lowerCAmelCase , lowerCAmelCase )
# For some reason the folliwng seems to be necessary sometimes for code_eval to work nice with multiprocessing
# https://stackoverflow.com/questions/60804599/python-multiprocessing-keeps-spawning-the-whole-script
if __name__ == "__main__":
main()
| 660 |
"""simple docstring"""
from timeit import timeit
def a__ ( lowerCAmelCase : int ):
'''simple docstring'''
if number < 0:
raise ValueError("the value of input must not be negative" )
UpperCAmelCase__ : Tuple = 0
while number:
number &= number - 1
result += 1
return result
def a__ ( lowerCAmelCase : int ):
'''simple docstring'''
if number < 0:
raise ValueError("the value of input must not be negative" )
UpperCAmelCase__ : Union[str, Any] = 0
while number:
if number % 2 == 1:
result += 1
number >>= 1
return result
def a__ ( ):
'''simple docstring'''
def do_benchmark(lowerCAmelCase : int ) -> None:
UpperCAmelCase__ : Dict = "import __main__ as z"
print(F"Benchmark when {number = }:" )
print(F"{get_set_bits_count_using_modulo_operator(lowerCAmelCase ) = }" )
UpperCAmelCase__ : Tuple = timeit("z.get_set_bits_count_using_modulo_operator(25)" , setup=lowerCAmelCase )
print(F"timeit() runs in {timing} seconds" )
print(F"{get_set_bits_count_using_brian_kernighans_algorithm(lowerCAmelCase ) = }" )
UpperCAmelCase__ : Any = timeit(
"z.get_set_bits_count_using_brian_kernighans_algorithm(25)" , setup=lowerCAmelCase , )
print(F"timeit() runs in {timing} seconds" )
for number in (25, 37, 58, 0):
do_benchmark(lowerCAmelCase )
print()
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 660 | 1 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
A__ : Dict = logging.get_logger(__name__)
class _lowercase ( lowerCAmelCase_ , lowerCAmelCase_ ):
'''simple docstring'''
_A = 'maskformer-swin'
_A = {
'num_attention_heads': 'num_heads',
'num_hidden_layers': 'num_layers',
}
def __init__( self , __UpperCamelCase=2_24 , __UpperCamelCase=4 , __UpperCamelCase=3 , __UpperCamelCase=96 , __UpperCamelCase=[2, 2, 6, 2] , __UpperCamelCase=[3, 6, 12, 24] , __UpperCamelCase=7 , __UpperCamelCase=4.0 , __UpperCamelCase=True , __UpperCamelCase=0.0 , __UpperCamelCase=0.0 , __UpperCamelCase=0.1 , __UpperCamelCase="gelu" , __UpperCamelCase=False , __UpperCamelCase=0.02 , __UpperCamelCase=1E-5 , __UpperCamelCase=None , __UpperCamelCase=None , **__UpperCamelCase , )-> Dict:
super().__init__(**__UpperCamelCase )
UpperCAmelCase__ : List[str] = image_size
UpperCAmelCase__ : Dict = patch_size
UpperCAmelCase__ : Tuple = num_channels
UpperCAmelCase__ : List[Any] = embed_dim
UpperCAmelCase__ : Tuple = depths
UpperCAmelCase__ : int = len(__UpperCamelCase )
UpperCAmelCase__ : Dict = num_heads
UpperCAmelCase__ : Any = window_size
UpperCAmelCase__ : List[str] = mlp_ratio
UpperCAmelCase__ : Any = qkv_bias
UpperCAmelCase__ : List[str] = hidden_dropout_prob
UpperCAmelCase__ : Optional[Any] = attention_probs_dropout_prob
UpperCAmelCase__ : List[Any] = drop_path_rate
UpperCAmelCase__ : Any = hidden_act
UpperCAmelCase__ : str = use_absolute_embeddings
UpperCAmelCase__ : Tuple = layer_norm_eps
UpperCAmelCase__ : Optional[int] = initializer_range
# we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
UpperCAmelCase__ : Tuple = int(embed_dim * 2 ** (len(__UpperCamelCase ) - 1) )
UpperCAmelCase__ : Union[str, Any] = ["stem"] + [F"stage{idx}" for idx in range(1 , len(__UpperCamelCase ) + 1 )]
UpperCAmelCase__ , UpperCAmelCase__ : List[Any] = get_aligned_output_features_output_indices(
out_features=__UpperCamelCase , out_indices=__UpperCamelCase , stage_names=self.stage_names )
| 660 |
"""simple docstring"""
import unittest
from transformers import load_tool
from .test_tools_common import ToolTesterMixin
class _lowercase ( unittest.TestCase , lowerCAmelCase_ ):
'''simple docstring'''
def lowerCAmelCase__ ( self )-> Dict:
UpperCAmelCase__ : Optional[Any] = load_tool("text-classification" )
self.tool.setup()
UpperCAmelCase__ : List[str] = load_tool("text-classification" , remote=__UpperCamelCase )
def lowerCAmelCase__ ( self )-> Union[str, Any]:
UpperCAmelCase__ : Dict = self.tool("That's quite cool" , ["positive", "negative"] )
self.assertEqual(__UpperCamelCase , "positive" )
def lowerCAmelCase__ ( self )-> str:
UpperCAmelCase__ : List[Any] = self.remote_tool("That's quite cool" , ["positive", "negative"] )
self.assertEqual(__UpperCamelCase , "positive" )
def lowerCAmelCase__ ( self )-> Optional[int]:
UpperCAmelCase__ : Any = self.tool(text="That's quite cool" , labels=["positive", "negative"] )
self.assertEqual(__UpperCamelCase , "positive" )
def lowerCAmelCase__ ( self )-> str:
UpperCAmelCase__ : str = self.remote_tool(text="That's quite cool" , labels=["positive", "negative"] )
self.assertEqual(__UpperCamelCase , "positive" )
| 660 | 1 |
"""simple docstring"""
from typing import Any, Dict, List, Union
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from ..image_utils import load_image
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_OBJECT_DETECTION_MAPPING, MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING
A__ : List[Any] = logging.get_logger(__name__)
A__ : Any = Dict[str, Any]
A__ : Optional[Any] = List[Prediction]
@add_end_docstrings(lowerCAmelCase_ )
class _lowercase ( lowerCAmelCase_ ):
'''simple docstring'''
def __init__( self , *__UpperCamelCase , **__UpperCamelCase )-> Optional[Any]:
super().__init__(*__UpperCamelCase , **__UpperCamelCase )
if self.framework == "tf":
raise ValueError(F"The {self.__class__} is only available in PyTorch." )
requires_backends(self , "vision" )
self.check_model_type(
dict(MODEL_FOR_OBJECT_DETECTION_MAPPING.items() + MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING.items() ) )
def lowerCAmelCase__ ( self , **__UpperCamelCase )-> str:
UpperCAmelCase__ : Any = {}
if "threshold" in kwargs:
UpperCAmelCase__ : Any = kwargs["threshold"]
return {}, {}, postprocess_kwargs
def __call__( self , *__UpperCamelCase , **__UpperCamelCase )-> Union[Predictions, List[Prediction]]:
return super().__call__(*__UpperCamelCase , **__UpperCamelCase )
def lowerCAmelCase__ ( self , __UpperCamelCase )-> Dict:
UpperCAmelCase__ : Any = load_image(__UpperCamelCase )
UpperCAmelCase__ : Tuple = torch.IntTensor([[image.height, image.width]] )
UpperCAmelCase__ : Union[str, Any] = self.image_processor(images=[image] , return_tensors="pt" )
if self.tokenizer is not None:
UpperCAmelCase__ : Dict = self.tokenizer(text=inputs["words"] , boxes=inputs["boxes"] , return_tensors="pt" )
UpperCAmelCase__ : int = target_size
return inputs
def lowerCAmelCase__ ( self , __UpperCamelCase )-> List[str]:
UpperCAmelCase__ : List[str] = model_inputs.pop("target_size" )
UpperCAmelCase__ : Union[str, Any] = self.model(**__UpperCamelCase )
UpperCAmelCase__ : Optional[Any] = outputs.__class__({"target_size": target_size, **outputs} )
if self.tokenizer is not None:
UpperCAmelCase__ : Optional[int] = model_inputs["bbox"]
return model_outputs
def lowerCAmelCase__ ( self , __UpperCamelCase , __UpperCamelCase=0.9 )-> str:
UpperCAmelCase__ : Optional[Any] = model_outputs["target_size"]
if self.tokenizer is not None:
# This is a LayoutLMForTokenClassification variant.
# The OCR got the boxes and the model classified the words.
UpperCAmelCase__ , UpperCAmelCase__ : Any = target_size[0].tolist()
def unnormalize(__UpperCamelCase ):
return self._get_bounding_box(
torch.Tensor(
[
(width * bbox[0] / 10_00),
(height * bbox[1] / 10_00),
(width * bbox[2] / 10_00),
(height * bbox[3] / 10_00),
] ) )
UpperCAmelCase__ , UpperCAmelCase__ : str = model_outputs["logits"].squeeze(0 ).softmax(dim=-1 ).max(dim=-1 )
UpperCAmelCase__ : List[Any] = [self.model.config.idalabel[prediction] for prediction in classes.tolist()]
UpperCAmelCase__ : Optional[int] = [unnormalize(__UpperCamelCase ) for bbox in model_outputs["bbox"].squeeze(0 )]
UpperCAmelCase__ : Optional[int] = ["score", "label", "box"]
UpperCAmelCase__ : Tuple = [dict(zip(__UpperCamelCase , __UpperCamelCase ) ) for vals in zip(scores.tolist() , __UpperCamelCase , __UpperCamelCase ) if vals[0] > threshold]
else:
# This is a regular ForObjectDetectionModel
UpperCAmelCase__ : Dict = self.image_processor.post_process_object_detection(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
UpperCAmelCase__ : Optional[Any] = raw_annotations[0]
UpperCAmelCase__ : str = raw_annotation["scores"]
UpperCAmelCase__ : int = raw_annotation["labels"]
UpperCAmelCase__ : Optional[Any] = raw_annotation["boxes"]
UpperCAmelCase__ : Union[str, Any] = scores.tolist()
UpperCAmelCase__ : Dict = [self.model.config.idalabel[label.item()] for label in labels]
UpperCAmelCase__ : Optional[Any] = [self._get_bounding_box(__UpperCamelCase ) for box in boxes]
# {"scores": [...], ...} --> [{"score":x, ...}, ...]
UpperCAmelCase__ : Any = ["score", "label", "box"]
UpperCAmelCase__ : List[str] = [
dict(zip(__UpperCamelCase , __UpperCamelCase ) )
for vals in zip(raw_annotation["scores"] , raw_annotation["labels"] , raw_annotation["boxes"] )
]
return annotation
def lowerCAmelCase__ ( self , __UpperCamelCase )-> Dict[str, int]:
if self.framework != "pt":
raise ValueError("The ObjectDetectionPipeline is only available in PyTorch." )
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : str = box.int().tolist()
UpperCAmelCase__ : str = {
"xmin": xmin,
"ymin": ymin,
"xmax": xmax,
"ymax": ymax,
}
return bbox
| 660 |
"""simple docstring"""
def a__ ( lowerCAmelCase : list , lowerCAmelCase : list ):
'''simple docstring'''
_validate_point(lowerCAmelCase )
_validate_point(lowerCAmelCase )
if len(lowerCAmelCase ) != len(lowerCAmelCase ):
raise ValueError("Both points must be in the same n-dimensional space" )
return float(sum(abs(a - b ) for a, b in zip(lowerCAmelCase , lowerCAmelCase ) ) )
def a__ ( lowerCAmelCase : list[float] ):
'''simple docstring'''
if point:
if isinstance(lowerCAmelCase , lowerCAmelCase ):
for item in point:
if not isinstance(lowerCAmelCase , (int, float) ):
UpperCAmelCase__ : Tuple = (
"Expected a list of numbers as input, found "
F"{type(lowerCAmelCase ).__name__}"
)
raise TypeError(lowerCAmelCase )
else:
UpperCAmelCase__ : Dict = F"Expected a list of numbers as input, found {type(lowerCAmelCase ).__name__}"
raise TypeError(lowerCAmelCase )
else:
raise ValueError("Missing an input" )
def a__ ( lowerCAmelCase : list , lowerCAmelCase : list ):
'''simple docstring'''
_validate_point(lowerCAmelCase )
_validate_point(lowerCAmelCase )
if len(lowerCAmelCase ) != len(lowerCAmelCase ):
raise ValueError("Both points must be in the same n-dimensional space" )
return float(sum(abs(x - y ) for x, y in zip(lowerCAmelCase , lowerCAmelCase ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 660 | 1 |
"""simple docstring"""
def a__ ( lowerCAmelCase : int = 100_0000 ):
'''simple docstring'''
UpperCAmelCase__ : List[str] = set(range(3 , lowerCAmelCase , 2 ) )
primes.add(2 )
for p in range(3 , lowerCAmelCase , 2 ):
if p not in primes:
continue
primes.difference_update(set(range(p * p , lowerCAmelCase , lowerCAmelCase ) ) )
UpperCAmelCase__ : int = [float(lowerCAmelCase ) for n in range(limit + 1 )]
for p in primes:
for n in range(lowerCAmelCase , limit + 1 , lowerCAmelCase ):
phi[n] *= 1 - 1 / p
return int(sum(phi[2:] ) )
if __name__ == "__main__":
print(f"""{solution() = }""")
| 660 |
"""simple docstring"""
import math
def a__ ( lowerCAmelCase : int ):
'''simple docstring'''
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(lowerCAmelCase ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def a__ ( lowerCAmelCase : int = 1_0001 ):
'''simple docstring'''
try:
UpperCAmelCase__ : List[str] = int(lowerCAmelCase )
except (TypeError, ValueError):
raise TypeError("Parameter nth must be int or castable to int." ) from None
if nth <= 0:
raise ValueError("Parameter nth must be greater than or equal to one." )
UpperCAmelCase__ : list[int] = []
UpperCAmelCase__ : str = 2
while len(lowerCAmelCase ) < nth:
if is_prime(lowerCAmelCase ):
primes.append(lowerCAmelCase )
num += 1
else:
num += 1
return primes[len(lowerCAmelCase ) - 1]
if __name__ == "__main__":
print(f"""{solution() = }""")
| 660 | 1 |
"""simple docstring"""
from __future__ import annotations
import math
from collections.abc import Callable
def a__ ( lowerCAmelCase : Callable[[int | float], int | float] , lowerCAmelCase : int | float , lowerCAmelCase : int | float , lowerCAmelCase : int = 100 , ):
'''simple docstring'''
UpperCAmelCase__ : List[str] = x_start
UpperCAmelCase__ : Optional[Any] = fnc(lowerCAmelCase )
UpperCAmelCase__ : Tuple = 0.0
for _ in range(lowerCAmelCase ):
# Approximates curve as a sequence of linear lines and sums their length
UpperCAmelCase__ : str = (x_end - x_start) / steps + xa
UpperCAmelCase__ : str = fnc(lowerCAmelCase )
length += math.hypot(xa - xa , fxa - fxa )
# Increment step
UpperCAmelCase__ : Tuple = xa
UpperCAmelCase__ : int = fxa
return length
if __name__ == "__main__":
def a__ ( lowerCAmelCase : Any ):
'''simple docstring'''
return math.sin(10 * x )
print("""f(x) = sin(10 * x)""")
print("""The length of the curve from x = -10 to x = 10 is:""")
A__ : str = 10
while i <= 100_000:
print(f"""With {i} steps: {line_length(f, -10, 10, i)}""")
i *= 10
| 660 |
"""simple docstring"""
import inspect
import math
import tempfile
import unittest
import numpy as np
from transformers import ViTMAEConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTMAEForPreTraining, ViTMAEModel
from transformers.models.vit.modeling_vit import VIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class _lowercase :
'''simple docstring'''
def __init__( self , __UpperCamelCase , __UpperCamelCase=13 , __UpperCamelCase=30 , __UpperCamelCase=2 , __UpperCamelCase=3 , __UpperCamelCase=True , __UpperCamelCase=True , __UpperCamelCase=32 , __UpperCamelCase=5 , __UpperCamelCase=4 , __UpperCamelCase=37 , __UpperCamelCase="gelu" , __UpperCamelCase=0.1 , __UpperCamelCase=0.1 , __UpperCamelCase=10 , __UpperCamelCase=0.02 , __UpperCamelCase=3 , __UpperCamelCase=0.6 , __UpperCamelCase=None , )-> List[Any]:
UpperCAmelCase__ : str = parent
UpperCAmelCase__ : Optional[Any] = batch_size
UpperCAmelCase__ : Any = image_size
UpperCAmelCase__ : Dict = patch_size
UpperCAmelCase__ : Optional[Any] = num_channels
UpperCAmelCase__ : Union[str, Any] = is_training
UpperCAmelCase__ : Any = use_labels
UpperCAmelCase__ : List[Any] = hidden_size
UpperCAmelCase__ : Dict = num_hidden_layers
UpperCAmelCase__ : List[str] = num_attention_heads
UpperCAmelCase__ : List[Any] = intermediate_size
UpperCAmelCase__ : Dict = hidden_act
UpperCAmelCase__ : List[Any] = hidden_dropout_prob
UpperCAmelCase__ : Optional[int] = attention_probs_dropout_prob
UpperCAmelCase__ : Optional[Any] = type_sequence_label_size
UpperCAmelCase__ : Union[str, Any] = initializer_range
UpperCAmelCase__ : int = mask_ratio
UpperCAmelCase__ : Tuple = scope
# in ViTMAE, the expected sequence length = (num_patches + 1) * (1 - config.mask_ratio), rounded above
# (we add 1 for the [CLS] token)
UpperCAmelCase__ : int = (image_size // patch_size) ** 2
UpperCAmelCase__ : str = int(math.ceil((1 - mask_ratio) * (num_patches + 1) ) )
def lowerCAmelCase__ ( self )-> List[Any]:
UpperCAmelCase__ : List[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCAmelCase__ : Optional[Any] = None
if self.use_labels:
UpperCAmelCase__ : Dict = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCAmelCase__ : Optional[int] = self.get_config()
return config, pixel_values, labels
def lowerCAmelCase__ ( self )-> int:
return ViTMAEConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=__UpperCamelCase , initializer_range=self.initializer_range , mask_ratio=self.mask_ratio , )
def lowerCAmelCase__ ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )-> Union[str, Any]:
UpperCAmelCase__ : Optional[Any] = ViTMAEModel(config=__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
UpperCAmelCase__ : List[str] = model(__UpperCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCAmelCase__ ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )-> Optional[int]:
UpperCAmelCase__ : List[Any] = ViTMAEForPreTraining(__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
UpperCAmelCase__ : Optional[int] = model(__UpperCamelCase )
UpperCAmelCase__ : List[str] = (self.image_size // self.patch_size) ** 2
UpperCAmelCase__ : List[str] = self.patch_size**2 * self.num_channels
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) )
# test greyscale images
UpperCAmelCase__ : Dict = 1
UpperCAmelCase__ : str = ViTMAEForPreTraining(__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
UpperCAmelCase__ : int = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
UpperCAmelCase__ : List[str] = model(__UpperCamelCase )
UpperCAmelCase__ : List[str] = self.patch_size**2
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) )
def lowerCAmelCase__ ( self )-> Dict:
UpperCAmelCase__ : Tuple = self.prepare_config_and_inputs()
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : str = config_and_inputs
UpperCAmelCase__ : int = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class _lowercase ( lowerCAmelCase_ , lowerCAmelCase_ , unittest.TestCase ):
'''simple docstring'''
_A = (ViTMAEModel, ViTMAEForPreTraining) if is_torch_available() else ()
_A = {'feature-extraction': ViTMAEModel} if is_torch_available() else {}
_A = False
_A = False
_A = False
_A = False
def lowerCAmelCase__ ( self )-> List[Any]:
UpperCAmelCase__ : Any = ViTMAEModelTester(self )
UpperCAmelCase__ : List[str] = ConfigTester(self , config_class=__UpperCamelCase , has_text_modality=__UpperCamelCase , hidden_size=37 )
def lowerCAmelCase__ ( self )-> int:
self.config_tester.run_common_tests()
@unittest.skip(reason="ViTMAE does not use inputs_embeds" )
def lowerCAmelCase__ ( self )-> Dict:
pass
def lowerCAmelCase__ ( self )-> List[str]:
UpperCAmelCase__ , UpperCAmelCase__ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase__ : int = model_class(__UpperCamelCase )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
UpperCAmelCase__ : Tuple = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__UpperCamelCase , nn.Linear ) )
def lowerCAmelCase__ ( self )-> Optional[Any]:
UpperCAmelCase__ , UpperCAmelCase__ : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase__ : List[str] = model_class(__UpperCamelCase )
UpperCAmelCase__ : Dict = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase__ : Dict = [*signature.parameters.keys()]
UpperCAmelCase__ : Tuple = ["pixel_values"]
self.assertListEqual(arg_names[:1] , __UpperCamelCase )
def lowerCAmelCase__ ( self )-> Any:
UpperCAmelCase__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__UpperCamelCase )
def lowerCAmelCase__ ( self )-> Dict:
UpperCAmelCase__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*__UpperCamelCase )
def lowerCAmelCase__ ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )-> Dict:
# make masks reproducible
np.random.seed(2 )
UpperCAmelCase__ : Tuple = int((pt_model.config.image_size // pt_model.config.patch_size) ** 2 )
UpperCAmelCase__ : Union[str, Any] = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
UpperCAmelCase__ : str = torch.from_numpy(__UpperCamelCase )
# Add `noise` argument.
# PT inputs will be prepared in `super().check_pt_tf_models()` with this added `noise` argument
UpperCAmelCase__ : Optional[Any] = pt_noise
super().check_pt_tf_models(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
def lowerCAmelCase__ ( self )-> List[Any]:
UpperCAmelCase__ , UpperCAmelCase__ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase__ : List[Any] = model_class(__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
# make random mask reproducible
torch.manual_seed(2 )
with torch.no_grad():
UpperCAmelCase__ : Optional[int] = model(**self._prepare_for_class(__UpperCamelCase , __UpperCamelCase ) )
UpperCAmelCase__ : Optional[Any] = outputs[0].cpu().numpy()
UpperCAmelCase__ : Union[str, Any] = 0
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(__UpperCamelCase )
UpperCAmelCase__ : Union[str, Any] = model_class.from_pretrained(__UpperCamelCase )
model.to(__UpperCamelCase )
# make random mask reproducible
torch.manual_seed(2 )
with torch.no_grad():
UpperCAmelCase__ : List[Any] = model(**self._prepare_for_class(__UpperCamelCase , __UpperCamelCase ) )
# Make sure we don't have nans
UpperCAmelCase__ : Tuple = after_outputs[0].cpu().numpy()
UpperCAmelCase__ : int = 0
UpperCAmelCase__ : str = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(__UpperCamelCase , 1E-5 )
@unittest.skip(
reason="ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load\n to get deterministic results." )
def lowerCAmelCase__ ( self )-> List[str]:
pass
@unittest.skip(
reason="ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load\n to get deterministic results." )
def lowerCAmelCase__ ( self )-> Any:
pass
@unittest.skip(
reason="ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load\n to get deterministic results." )
def lowerCAmelCase__ ( self )-> Optional[Any]:
pass
@unittest.skip(reason="ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load" )
def lowerCAmelCase__ ( self )-> List[Any]:
pass
@unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." )
def lowerCAmelCase__ ( self )-> Union[str, Any]:
pass
@slow
def lowerCAmelCase__ ( self )-> Union[str, Any]:
for model_name in VIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase__ : Tuple = ViTMAEModel.from_pretrained(__UpperCamelCase )
self.assertIsNotNone(__UpperCamelCase )
def a__ ( ):
'''simple docstring'''
UpperCAmelCase__ : Union[str, Any] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class _lowercase ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def lowerCAmelCase__ ( self )-> List[Any]:
return ViTImageProcessor.from_pretrained("facebook/vit-mae-base" ) if is_vision_available() else None
@slow
def lowerCAmelCase__ ( self )-> Optional[int]:
# make random mask reproducible across the PT and TF model
np.random.seed(2 )
UpperCAmelCase__ : Any = ViTMAEForPreTraining.from_pretrained("facebook/vit-mae-base" ).to(__UpperCamelCase )
UpperCAmelCase__ : Tuple = self.default_image_processor
UpperCAmelCase__ : List[Any] = prepare_img()
UpperCAmelCase__ : Optional[Any] = image_processor(images=__UpperCamelCase , return_tensors="pt" ).to(__UpperCamelCase )
# prepare a noise vector that will be also used for testing the TF model
# (this way we can ensure that the PT and TF models operate on the same inputs)
UpperCAmelCase__ : List[Any] = ViTMAEConfig()
UpperCAmelCase__ : str = int((vit_mae_config.image_size // vit_mae_config.patch_size) ** 2 )
UpperCAmelCase__ : Optional[int] = np.random.uniform(size=(1, num_patches) )
# forward pass
with torch.no_grad():
UpperCAmelCase__ : List[str] = model(**__UpperCamelCase , noise=torch.from_numpy(__UpperCamelCase ).to(device=__UpperCamelCase ) )
# verify the logits
UpperCAmelCase__ : str = torch.Size((1, 1_96, 7_68) )
self.assertEqual(outputs.logits.shape , __UpperCamelCase )
UpperCAmelCase__ : List[Any] = torch.tensor(
[[-0.0548, -1.7023, -0.9325], [0.3721, -0.5670, -0.2233], [0.8235, -1.3878, -0.3524]] )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3] , expected_slice.to(__UpperCamelCase ) , atol=1E-4 ) )
| 660 | 1 |
"""simple docstring"""
import collections
import json
import os
import re
from typing import TYPE_CHECKING, List, Optional, Tuple
import numpy as np
from ...tokenization_utils_fast import PreTrainedTokenizer
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
A__ : str = logging.get_logger(__name__)
A__ : Any = {"""vocab_file""": """vocab.txt""", """emoji_file""": """emoji.json"""}
A__ : int = {
"""vocab_file""": {
"""abeja/gpt-neox-japanese-2.7b""": """https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/vocab.txt""",
},
"""emoji_file""": {
"""abeja/gpt-neox-japanese-2.7b""": """https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/emoji.json""",
},
}
A__ : Dict = {
"""abeja/gpt-neox-japanese-2.7b""": 2_048,
}
def a__ ( lowerCAmelCase : Optional[int] , lowerCAmelCase : Optional[Any] ):
'''simple docstring'''
with open(lowerCAmelCase , "r" , encoding="utf-8" ) as f:
UpperCAmelCase__ : Optional[Any] = json.loads(f.read() )
UpperCAmelCase__ : Optional[int] = collections.OrderedDict()
UpperCAmelCase__ : List[str] = collections.OrderedDict()
UpperCAmelCase__ : int = collections.OrderedDict()
with open(lowerCAmelCase , "r" , encoding="utf-8" ) as f:
UpperCAmelCase__ : Dict = f.readlines()
UpperCAmelCase__ : List[str] = [[t.rstrip("\n" )] if (t == "," or "," not in t) else t.rstrip("\n" ).split("," ) for t in token]
for idx, b in enumerate(lowerCAmelCase ):
UpperCAmelCase__ : List[Any] = b
UpperCAmelCase__ : str = idx
for wd in b:
UpperCAmelCase__ : str = idx
return vocab, raw_vocab, ids_to_tokens, emoji
class _lowercase ( lowerCAmelCase_ ):
'''simple docstring'''
_A = VOCAB_FILES_NAMES
_A = PRETRAINED_VOCAB_FILES_MAP
_A = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_A = ['input_ids', 'attention_mask']
def __init__( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase="<|endoftext|>" , __UpperCamelCase="<|endoftext|>" , __UpperCamelCase="<|startoftext|>" , __UpperCamelCase="<|endoftext|>" , __UpperCamelCase=False , **__UpperCamelCase , )-> Dict:
super().__init__(
unk_token=__UpperCamelCase , pad_token=__UpperCamelCase , bos_token=__UpperCamelCase , eos_token=__UpperCamelCase , do_clean_text=__UpperCamelCase , **__UpperCamelCase , )
if not os.path.isfile(__UpperCamelCase ):
raise ValueError(
F"Can't find a vocabulary file at path '{vocab_file}'. To load the vocabulary from a Google pretrained"
" model use `tokenizer = GPTNeoXJapaneseokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`" )
if not os.path.isfile(__UpperCamelCase ):
raise ValueError(
F"Can't find a emoji file at path '{emoji_file}'. To load the emoji information from a Google"
" pretrained model use `tokenizer = GPTNeoXJapaneseokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`" )
UpperCAmelCase__ : List[Any] = do_clean_text
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : int = load_vocab_and_emoji(__UpperCamelCase , __UpperCamelCase )
UpperCAmelCase__ : Tuple = SubWordJapaneseTokenizer(
vocab=self.vocab , ids_to_tokens=self.ids_to_tokens , emoji=self.emoji )
@property
def lowerCAmelCase__ ( self )-> Union[str, Any]:
# self.vocab contains support for character fluctuation unique to Japanese, and has a large number of vocab
return len(self.raw_vocab )
def lowerCAmelCase__ ( self )-> Optional[Any]:
return dict(self.raw_vocab , **self.added_tokens_encoder )
def lowerCAmelCase__ ( self , __UpperCamelCase )-> int:
return self.subword_tokenizer.tokenize(__UpperCamelCase , clean=self.do_clean_text )
def lowerCAmelCase__ ( self , __UpperCamelCase )-> Optional[int]:
return self.vocab.get(__UpperCamelCase , self.vocab.get(self.unk_token ) )
def lowerCAmelCase__ ( self , __UpperCamelCase )-> Union[str, Any]:
return self.subword_tokenizer.convert_id_to_token(__UpperCamelCase )
def lowerCAmelCase__ ( self , __UpperCamelCase )-> Optional[Any]:
UpperCAmelCase__ : str = "".join(__UpperCamelCase ).strip()
return out_string
def lowerCAmelCase__ ( self , __UpperCamelCase )-> List[int]:
UpperCAmelCase__ : Dict = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(__UpperCamelCase , add_special_tokens=__UpperCamelCase ) + [self.eos_token_id] )
if len(__UpperCamelCase ) > self.model_max_length:
UpperCAmelCase__ : List[str] = input_ids[-self.model_max_length :]
return input_ids
def lowerCAmelCase__ ( self , __UpperCamelCase , __UpperCamelCase = None )-> Tuple[str]:
UpperCAmelCase__ : Any = 0
if os.path.isdir(__UpperCamelCase ):
UpperCAmelCase__ : Optional[int] = os.path.join(
__UpperCamelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
UpperCAmelCase__ : Any = os.path.join(
__UpperCamelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["emoji_file"] )
else:
UpperCAmelCase__ : Tuple = (
(filename_prefix + "-" if filename_prefix else "") + save_directory + VOCAB_FILES_NAMES["vocab_file"]
)
UpperCAmelCase__ : List[str] = (
(filename_prefix + "-" if filename_prefix else "") + save_directory + VOCAB_FILES_NAMES["emoji_file"]
)
with open(__UpperCamelCase , "w" , encoding="utf-8" ) as writer:
for token_index, token in self.ids_to_tokens.items():
if index != token_index:
logger.warning(
F"Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive."
" Please check that the vocabulary is not corrupted!" )
UpperCAmelCase__ : str = token_index
writer.write(",".join(__UpperCamelCase ) + "\n" )
index += 1
with open(__UpperCamelCase , "w" , encoding="utf-8" ) as writer:
json.dump(self.emoji , __UpperCamelCase )
return vocab_file, emoji_file
class _lowercase ( lowerCAmelCase_ ):
'''simple docstring'''
def __init__( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )-> Optional[int]:
UpperCAmelCase__ : Dict = vocab # same as swe
UpperCAmelCase__ : Optional[int] = ids_to_tokens # same as bpe
UpperCAmelCase__ : str = emoji
UpperCAmelCase__ : List[Any] = np.max([len(__UpperCamelCase ) for w in self.vocab.keys()] )
UpperCAmelCase__ : Tuple = re.compile(r"(https?|ftp)(:\/\/[-_\.!~*\'()a-zA-Z0-9;\/?:\@&=\+$,%#]+)" )
UpperCAmelCase__ : Optional[int] = re.compile(r"[A-Za-z0-9\._+]*@[\-_0-9A-Za-z]+(\.[A-Za-z]+)*" )
UpperCAmelCase__ : List[str] = re.compile(r"[\(]{0,1}[0-9]{2,4}[\)\-\(]{0,1}[0-9]{2,4}[\)\-]{0,1}[0-9]{3,4}" )
UpperCAmelCase__ : Any = re.compile(
r"([12]\d{3}[/\-年])*(0?[1-9]|1[0-2])[/\-月]((0?[1-9]|[12][0-9]|3[01])日?)*(\d{1,2}|:|\d{1,2}時|\d{1,2}分|\(日\)|\(月\)|\(火\)|\(水\)|\(木\)|\(金\)|\(土\)|㈰|㈪|㈫|㈬|㈭|㈮|㈯)*" )
UpperCAmelCase__ : List[str] = re.compile(
r"(明治|大正|昭和|平成|令和|㍾|㍽|㍼|㍻|\u32ff)\d{1,2}年(0?[1-9]|1[0-2])月(0?[1-9]|[12][0-9]|3[01])日(\d{1,2}|:|\d{1,2}時|\d{1,2}分|\(日\)|\(月\)|\(火\)|\(水\)|\(木\)|\(金\)|\(土\)|㈰|㈪|㈫|㈬|㈭|㈮|㈯)*" )
UpperCAmelCase__ : int = re.compile(
r"((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*億)*((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*万)*((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*千)*(0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*(千円|万円|千万円|円|千ドル|万ドル|千万ドル|ドル|千ユーロ|万ユーロ|千万ユーロ|ユーロ)+(\(税込\)|\(税抜\)|\+tax)*" )
UpperCAmelCase__ : List[str] = "─━│┃┄┅┆┇┈┉┊┋┌┍┎┏┐┑┒┓└┕┖┗┘┙┚┛├┝┞┟┠┡┢┣┤┥┦┧┨┩┪┫┬┭┮┯┰┱┲┳┴┵┶┷┸┹┺┻┼┽┾┿╀╁╂╃╄╅╆╇╈╉╊╋╌╍╎╏═║╒╓╔╕╖╗╘╙╚╛╜╝╞╟╠╡╢╣╤╥╦╧╨╩╪╫╬╭╮╯╰╱╲╳╴╵╶╷╸╹╺╻╼╽╾╿"
UpperCAmelCase__ : Tuple = "▀▁▂▃▄▅▆▇█▉▊▋▌▍▎▏▐░▒▓▔▕▖▗▘▙▚▛▜▝▞▟"
UpperCAmelCase__ : Optional[int] = str.maketrans({k: "<BLOCK>" for k in keisen + blocks} )
def __len__( self )-> List[Any]:
return len(self.ids_to_tokens )
def lowerCAmelCase__ ( self , __UpperCamelCase )-> Dict:
UpperCAmelCase__ : Optional[int] = self.content_repattera.sub("<URL>" , __UpperCamelCase )
UpperCAmelCase__ : Any = self.content_repattera.sub("<EMAIL>" , __UpperCamelCase )
UpperCAmelCase__ : str = self.content_repattera.sub("<TEL>" , __UpperCamelCase )
UpperCAmelCase__ : Optional[int] = self.content_repattera.sub("<DATE>" , __UpperCamelCase )
UpperCAmelCase__ : Optional[Any] = self.content_repattera.sub("<DATE>" , __UpperCamelCase )
UpperCAmelCase__ : Optional[int] = self.content_repattera.sub("<PRICE>" , __UpperCamelCase )
UpperCAmelCase__ : List[str] = content.translate(self.content_transa )
while "<BLOCK><BLOCK>" in content:
UpperCAmelCase__ : Union[str, Any] = content.replace("<BLOCK><BLOCK>" , "<BLOCK>" )
return content
def lowerCAmelCase__ ( self , __UpperCamelCase , __UpperCamelCase=False )-> List[Any]:
UpperCAmelCase__ : Optional[int] = text.replace(" " , "<SP>" )
UpperCAmelCase__ : Any = text.replace(" " , "<SP>" )
UpperCAmelCase__ : Union[str, Any] = text.replace("\r\n" , "<BR>" )
UpperCAmelCase__ : List[str] = text.replace("\n" , "<BR>" )
UpperCAmelCase__ : Any = text.replace("\r" , "<BR>" )
UpperCAmelCase__ : int = text.replace("\t" , "<TAB>" )
UpperCAmelCase__ : str = text.replace("—" , "ー" )
UpperCAmelCase__ : Optional[Any] = text.replace("−" , "ー" )
for k, v in self.emoji["emoji"].items():
if k in text:
UpperCAmelCase__ : Any = text.replace(__UpperCamelCase , __UpperCamelCase )
if clean:
UpperCAmelCase__ : Any = self.clean_text(__UpperCamelCase )
def check_simbol(__UpperCamelCase ):
UpperCAmelCase__ : Dict = x.encode()
if len(__UpperCamelCase ) == 1 and len(__UpperCamelCase ) == 2:
UpperCAmelCase__ : Optional[Any] = (int(e[0] ) << 8) + int(e[1] )
if (
(c >= 0Xc2a1 and c <= 0Xc2bf)
or (c >= 0Xc780 and c <= 0Xc783)
or (c >= 0Xcab9 and c <= 0Xcbbf)
or (c >= 0Xcc80 and c <= 0Xcda2)
):
return True
return False
def checkuae(__UpperCamelCase ):
UpperCAmelCase__ : Any = x.encode()
if len(__UpperCamelCase ) == 1 and len(__UpperCamelCase ) == 3:
UpperCAmelCase__ : Optional[int] = (int(e[0] ) << 16) + (int(e[1] ) << 8) + int(e[2] )
if c >= 0Xe2_8080 and c <= 0Xe2_b07f:
return True
return False
UpperCAmelCase__ : Any = 0
UpperCAmelCase__ : str = []
while pos < len(__UpperCamelCase ):
UpperCAmelCase__ : Any = min(len(__UpperCamelCase ) , pos + self.maxlen + 1 ) if text[pos] == "<" else pos + 3
UpperCAmelCase__ : Any = [] # (token_id, token, pos)
for e in range(__UpperCamelCase , __UpperCamelCase , -1 ):
UpperCAmelCase__ : str = text[pos:e]
if wd in self.vocab:
if wd[0] == "<" and len(__UpperCamelCase ) > 2:
UpperCAmelCase__ : Optional[int] = [(self.vocab[wd], wd, e)]
break
else:
candidates.append((self.vocab[wd], wd, e) )
if len(__UpperCamelCase ) > 0:
# the smallest token_id is adopted
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : List[str] = sorted(__UpperCamelCase , key=lambda __UpperCamelCase : x[0] )[0]
result.append(__UpperCamelCase )
UpperCAmelCase__ : List[Any] = e
else:
UpperCAmelCase__ : Union[str, Any] = pos + 1
UpperCAmelCase__ : Optional[int] = text[pos:end]
if check_simbol(__UpperCamelCase ):
result.append("<KIGOU>" )
elif checkuae(__UpperCamelCase ):
result.append("<U2000U2BFF>" )
else:
for i in wd.encode("utf-8" ):
result.append("<|byte%d|>" % i )
UpperCAmelCase__ : List[Any] = end
return result
def lowerCAmelCase__ ( self , __UpperCamelCase , __UpperCamelCase="\n" )-> Union[str, Any]:
UpperCAmelCase__ : Optional[Any] = []
UpperCAmelCase__ : str = []
UpperCAmelCase__ : List[Any] = self.ids_to_tokens[index][0]
if word[:6] == "<|byte" and word[-2:] == "|>":
byte_tokens.append(int(word[6:-2] ) )
else:
if len(__UpperCamelCase ) > 0:
words.append(bytearray(__UpperCamelCase ).decode("utf-8" , errors="replace" ) )
UpperCAmelCase__ : Dict = []
if word[:7] == "<|emoji" and word[-2:] == "|>":
words.append(self.emoji["emoji_inv"][word] )
elif word == "<SP>":
words.append(" " )
elif word == "<BR>":
words.append(__UpperCamelCase )
elif word == "<TAB>":
words.append("\t" )
elif word == "<BLOCK>":
words.append("▀" )
elif word == "<KIGOU>":
words.append("ǀ" )
elif word == "<U2000U2BFF>":
words.append("‖" )
else:
words.append(__UpperCamelCase )
if len(__UpperCamelCase ) > 0:
words.append(bytearray(__UpperCamelCase ).decode("utf-8" , errors="replace" ) )
UpperCAmelCase__ : str = "".join(__UpperCamelCase )
return text
| 660 |
"""simple docstring"""
# DISCLAIMER: This file is strongly influenced by https://github.com/ermongroup/ddim
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import flax
import jax
import jax.numpy as jnp
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils_flax import (
CommonSchedulerState,
FlaxKarrasDiffusionSchedulers,
FlaxSchedulerMixin,
FlaxSchedulerOutput,
add_noise_common,
get_velocity_common,
)
@flax.struct.dataclass
class _lowercase :
'''simple docstring'''
_A = 42
# setable values
_A = 42
_A = 42
_A = None
@classmethod
def lowerCAmelCase__ ( cls , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )-> Union[str, Any]:
return cls(common=__UpperCamelCase , init_noise_sigma=__UpperCamelCase , timesteps=__UpperCamelCase )
@dataclass
class _lowercase ( lowerCAmelCase_ ):
'''simple docstring'''
_A = 42
class _lowercase ( lowerCAmelCase_ , lowerCAmelCase_ ):
'''simple docstring'''
_A = [e.name for e in FlaxKarrasDiffusionSchedulers]
_A = 42
@property
def lowerCAmelCase__ ( self )-> Optional[int]:
return True
@register_to_config
def __init__( self , __UpperCamelCase = 10_00 , __UpperCamelCase = 0.0001 , __UpperCamelCase = 0.02 , __UpperCamelCase = "linear" , __UpperCamelCase = None , __UpperCamelCase = "fixed_small" , __UpperCamelCase = True , __UpperCamelCase = "epsilon" , __UpperCamelCase = jnp.floataa , )-> List[str]:
UpperCAmelCase__ : int = dtype
def lowerCAmelCase__ ( self , __UpperCamelCase = None )-> DDPMSchedulerState:
if common is None:
UpperCAmelCase__ : int = CommonSchedulerState.create(self )
# standard deviation of the initial noise distribution
UpperCAmelCase__ : Tuple = jnp.array(1.0 , dtype=self.dtype )
UpperCAmelCase__ : Tuple = jnp.arange(0 , self.config.num_train_timesteps ).round()[::-1]
return DDPMSchedulerState.create(
common=__UpperCamelCase , init_noise_sigma=__UpperCamelCase , timesteps=__UpperCamelCase , )
def lowerCAmelCase__ ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = None )-> jnp.ndarray:
return sample
def lowerCAmelCase__ ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = () )-> DDPMSchedulerState:
UpperCAmelCase__ : Dict = self.config.num_train_timesteps // num_inference_steps
# creates integer timesteps by multiplying by ratio
# rounding to avoid issues when num_inference_step is power of 3
UpperCAmelCase__ : Optional[int] = (jnp.arange(0 , __UpperCamelCase ) * step_ratio).round()[::-1]
return state.replace(
num_inference_steps=__UpperCamelCase , timesteps=__UpperCamelCase , )
def lowerCAmelCase__ ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase=None , __UpperCamelCase=None )-> Union[str, Any]:
UpperCAmelCase__ : Optional[Any] = state.common.alphas_cumprod[t]
UpperCAmelCase__ : int = jnp.where(t > 0 , state.common.alphas_cumprod[t - 1] , jnp.array(1.0 , dtype=self.dtype ) )
# For t > 0, compute predicted variance βt (see formula (6) and (7) from https://arxiv.org/pdf/2006.11239.pdf)
# and sample from it to get previous sample
# x_{t-1} ~ N(pred_prev_sample, variance) == add variance to pred_sample
UpperCAmelCase__ : Any = (1 - alpha_prod_t_prev) / (1 - alpha_prod_t) * state.common.betas[t]
if variance_type is None:
UpperCAmelCase__ : Union[str, Any] = self.config.variance_type
# hacks - were probably added for training stability
if variance_type == "fixed_small":
UpperCAmelCase__ : Dict = jnp.clip(__UpperCamelCase , a_min=1E-20 )
# for rl-diffuser https://arxiv.org/abs/2205.09991
elif variance_type == "fixed_small_log":
UpperCAmelCase__ : Tuple = jnp.log(jnp.clip(__UpperCamelCase , a_min=1E-20 ) )
elif variance_type == "fixed_large":
UpperCAmelCase__ : Union[str, Any] = state.common.betas[t]
elif variance_type == "fixed_large_log":
# Glide max_log
UpperCAmelCase__ : Optional[int] = jnp.log(state.common.betas[t] )
elif variance_type == "learned":
return predicted_variance
elif variance_type == "learned_range":
UpperCAmelCase__ : List[str] = variance
UpperCAmelCase__ : Union[str, Any] = state.common.betas[t]
UpperCAmelCase__ : Optional[int] = (predicted_variance + 1) / 2
UpperCAmelCase__ : Any = frac * max_log + (1 - frac) * min_log
return variance
def lowerCAmelCase__ ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = None , __UpperCamelCase = True , )-> Union[FlaxDDPMSchedulerOutput, Tuple]:
UpperCAmelCase__ : List[str] = timestep
if key is None:
UpperCAmelCase__ : int = jax.random.PRNGKey(0 )
if model_output.shape[1] == sample.shape[1] * 2 and self.config.variance_type in ["learned", "learned_range"]:
UpperCAmelCase__ , UpperCAmelCase__ : List[str] = jnp.split(__UpperCamelCase , sample.shape[1] , axis=1 )
else:
UpperCAmelCase__ : Optional[Any] = None
# 1. compute alphas, betas
UpperCAmelCase__ : Union[str, Any] = state.common.alphas_cumprod[t]
UpperCAmelCase__ : Tuple = jnp.where(t > 0 , state.common.alphas_cumprod[t - 1] , jnp.array(1.0 , dtype=self.dtype ) )
UpperCAmelCase__ : Union[str, Any] = 1 - alpha_prod_t
UpperCAmelCase__ : Tuple = 1 - alpha_prod_t_prev
# 2. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf
if self.config.prediction_type == "epsilon":
UpperCAmelCase__ : List[Any] = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
elif self.config.prediction_type == "sample":
UpperCAmelCase__ : Any = model_output
elif self.config.prediction_type == "v_prediction":
UpperCAmelCase__ : Union[str, Any] = (alpha_prod_t**0.5) * sample - (beta_prod_t**0.5) * model_output
else:
raise ValueError(
F"prediction_type given as {self.config.prediction_type} must be one of `epsilon`, `sample` "
" for the FlaxDDPMScheduler." )
# 3. Clip "predicted x_0"
if self.config.clip_sample:
UpperCAmelCase__ : List[Any] = jnp.clip(__UpperCamelCase , -1 , 1 )
# 4. Compute coefficients for pred_original_sample x_0 and current sample x_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
UpperCAmelCase__ : List[str] = (alpha_prod_t_prev ** 0.5 * state.common.betas[t]) / beta_prod_t
UpperCAmelCase__ : List[Any] = state.common.alphas[t] ** 0.5 * beta_prod_t_prev / beta_prod_t
# 5. Compute predicted previous sample µ_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
UpperCAmelCase__ : Tuple = pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample
# 6. Add noise
def random_variance():
UpperCAmelCase__ : Any = jax.random.split(__UpperCamelCase , num=1 )
UpperCAmelCase__ : int = jax.random.normal(__UpperCamelCase , shape=model_output.shape , dtype=self.dtype )
return (self._get_variance(__UpperCamelCase , __UpperCamelCase , predicted_variance=__UpperCamelCase ) ** 0.5) * noise
UpperCAmelCase__ : Dict = jnp.where(t > 0 , random_variance() , jnp.zeros(model_output.shape , dtype=self.dtype ) )
UpperCAmelCase__ : Any = pred_prev_sample + variance
if not return_dict:
return (pred_prev_sample, state)
return FlaxDDPMSchedulerOutput(prev_sample=__UpperCamelCase , state=__UpperCamelCase )
def lowerCAmelCase__ ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , )-> jnp.ndarray:
return add_noise_common(state.common , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
def lowerCAmelCase__ ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , )-> jnp.ndarray:
return get_velocity_common(state.common , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
def __len__( self )-> Tuple:
return self.config.num_train_timesteps
| 660 | 1 |
"""simple docstring"""
# flake8: noqa
# Lint as: python3
from typing import Dict, List, Optional, Type
from .. import config
from ..utils import logging
from .formatting import (
ArrowFormatter,
CustomFormatter,
Formatter,
PandasFormatter,
PythonFormatter,
TensorFormatter,
format_table,
query_table,
)
from .np_formatter import NumpyFormatter
A__ : int = logging.get_logger(__name__)
A__ : Dict[Optional[str], Type[Formatter]] = {}
A__ : Dict[Optional[str], str] = {}
A__ : Dict[Optional[str], Exception] = {}
def a__ ( lowerCAmelCase : type , lowerCAmelCase : Optional[str] , lowerCAmelCase : Optional[List[str]] = None , ):
'''simple docstring'''
UpperCAmelCase__ : Optional[Any] = aliases if aliases is not None else []
if format_type in _FORMAT_TYPES:
logger.warning(
F"Overwriting format type '{format_type}' ({_FORMAT_TYPES[format_type].__name__} -> {formatter_cls.__name__})" )
UpperCAmelCase__ : Union[str, Any] = formatter_cls
for alias in set(aliases + [format_type] ):
if alias in _FORMAT_TYPES_ALIASES:
logger.warning(
F"Overwriting format type alias '{alias}' ({_FORMAT_TYPES_ALIASES[alias]} -> {format_type})" )
UpperCAmelCase__ : Union[str, Any] = format_type
def a__ ( lowerCAmelCase : Exception , lowerCAmelCase : Optional[str] , lowerCAmelCase : Optional[List[str]] = None ):
'''simple docstring'''
UpperCAmelCase__ : Dict = aliases if aliases is not None else []
for alias in set(aliases + [format_type] ):
UpperCAmelCase__ : Any = unavailable_error
# Here we define all the available formatting functions that can be used by `Dataset.set_format`
_register_formatter(PythonFormatter, None, aliases=["""python"""])
_register_formatter(ArrowFormatter, """arrow""", aliases=["""pa""", """pyarrow"""])
_register_formatter(NumpyFormatter, """numpy""", aliases=["""np"""])
_register_formatter(PandasFormatter, """pandas""", aliases=["""pd"""])
_register_formatter(CustomFormatter, """custom""")
if config.TORCH_AVAILABLE:
from .torch_formatter import TorchFormatter
_register_formatter(TorchFormatter, """torch""", aliases=["""pt""", """pytorch"""])
else:
A__ : Union[str, Any] = ValueError("""PyTorch needs to be installed to be able to return PyTorch tensors.""")
_register_unavailable_formatter(_torch_error, """torch""", aliases=["""pt""", """pytorch"""])
if config.TF_AVAILABLE:
from .tf_formatter import TFFormatter
_register_formatter(TFFormatter, """tensorflow""", aliases=["""tf"""])
else:
A__ : Union[str, Any] = ValueError("""Tensorflow needs to be installed to be able to return Tensorflow tensors.""")
_register_unavailable_formatter(_tf_error, """tensorflow""", aliases=["""tf"""])
if config.JAX_AVAILABLE:
from .jax_formatter import JaxFormatter
_register_formatter(JaxFormatter, """jax""", aliases=[])
else:
A__ : Any = ValueError("""JAX needs to be installed to be able to return JAX arrays.""")
_register_unavailable_formatter(_jax_error, """jax""", aliases=[])
def a__ ( lowerCAmelCase : Optional[str] ):
'''simple docstring'''
if format_type in _FORMAT_TYPES_ALIASES:
return _FORMAT_TYPES_ALIASES[format_type]
else:
return format_type
def a__ ( lowerCAmelCase : Optional[str] , **lowerCAmelCase : Optional[int] ):
'''simple docstring'''
UpperCAmelCase__ : Optional[int] = get_format_type_from_alias(lowerCAmelCase )
if format_type in _FORMAT_TYPES:
return _FORMAT_TYPES[format_type](**lowerCAmelCase )
if format_type in _FORMAT_TYPES_ALIASES_UNAVAILABLE:
raise _FORMAT_TYPES_ALIASES_UNAVAILABLE[format_type]
else:
raise ValueError(
F"Return type should be None or selected in {list(type for type in _FORMAT_TYPES.keys() if type != None )}, but got '{format_type}'" )
| 660 |
"""simple docstring"""
from pathlib import PurePosixPath
from typing import Optional
import fsspec
from fsspec import AbstractFileSystem
from huggingface_hub.hf_api import DatasetInfo
from ..utils.file_utils import get_authentication_headers_for_url
from ..utils.hub import hf_hub_url
class _lowercase ( lowerCAmelCase_ ):
'''simple docstring'''
_A = ''
_A = 'hf-legacy' # "hf://"" is reserved for hffs
def __init__( self , __UpperCamelCase = None , __UpperCamelCase = None , **__UpperCamelCase , )-> str:
super().__init__(self , **__UpperCamelCase )
UpperCAmelCase__ : int = repo_info
UpperCAmelCase__ : Optional[int] = token
UpperCAmelCase__ : Optional[Any] = None
def lowerCAmelCase__ ( self )-> Optional[Any]:
if self.dir_cache is None:
UpperCAmelCase__ : str = {}
for hf_file in self.repo_info.siblings:
# TODO(QL): add sizes
UpperCAmelCase__ : str = {
"name": hf_file.rfilename,
"size": None,
"type": "file",
}
self.dir_cache.update(
{
str(__UpperCamelCase ): {"name": str(__UpperCamelCase ), "size": None, "type": "directory"}
for d in list(PurePosixPath(hf_file.rfilename ).parents )[:-1]
} )
def lowerCAmelCase__ ( self , __UpperCamelCase , __UpperCamelCase = "rb" , **__UpperCamelCase , )-> List[Any]:
if not isinstance(self.repo_info , __UpperCamelCase ):
raise NotImplementedError(F"Open is only implemented for dataset repositories, but got {self.repo_info}" )
UpperCAmelCase__ : Union[str, Any] = hf_hub_url(self.repo_info.id , __UpperCamelCase , revision=self.repo_info.sha )
return fsspec.open(
__UpperCamelCase , mode=__UpperCamelCase , headers=get_authentication_headers_for_url(__UpperCamelCase , use_auth_token=self.token ) , client_kwargs={"trust_env": True} , ).open()
def lowerCAmelCase__ ( self , __UpperCamelCase , **__UpperCamelCase )-> List[str]:
self._get_dirs()
UpperCAmelCase__ : Union[str, Any] = self._strip_protocol(__UpperCamelCase )
if path in self.dir_cache:
return self.dir_cache[path]
else:
raise FileNotFoundError(__UpperCamelCase )
def lowerCAmelCase__ ( self , __UpperCamelCase , __UpperCamelCase=False , **__UpperCamelCase )-> str:
self._get_dirs()
UpperCAmelCase__ : str = PurePosixPath(path.strip("/" ) )
UpperCAmelCase__ : Optional[Any] = {}
for p, f in self.dir_cache.items():
UpperCAmelCase__ : Optional[int] = PurePosixPath(p.strip("/" ) )
UpperCAmelCase__ : Dict = p.parent
if root == path:
UpperCAmelCase__ : Tuple = f
UpperCAmelCase__ : List[Any] = list(paths.values() )
if detail:
return out
else:
return sorted(f["name"] for f in out )
| 660 | 1 |
"""simple docstring"""
import inspect
import math
import tempfile
import unittest
import numpy as np
from transformers import ViTMAEConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTMAEForPreTraining, ViTMAEModel
from transformers.models.vit.modeling_vit import VIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class _lowercase :
'''simple docstring'''
def __init__( self , __UpperCamelCase , __UpperCamelCase=13 , __UpperCamelCase=30 , __UpperCamelCase=2 , __UpperCamelCase=3 , __UpperCamelCase=True , __UpperCamelCase=True , __UpperCamelCase=32 , __UpperCamelCase=5 , __UpperCamelCase=4 , __UpperCamelCase=37 , __UpperCamelCase="gelu" , __UpperCamelCase=0.1 , __UpperCamelCase=0.1 , __UpperCamelCase=10 , __UpperCamelCase=0.02 , __UpperCamelCase=3 , __UpperCamelCase=0.6 , __UpperCamelCase=None , )-> List[Any]:
UpperCAmelCase__ : str = parent
UpperCAmelCase__ : Optional[Any] = batch_size
UpperCAmelCase__ : Any = image_size
UpperCAmelCase__ : Dict = patch_size
UpperCAmelCase__ : Optional[Any] = num_channels
UpperCAmelCase__ : Union[str, Any] = is_training
UpperCAmelCase__ : Any = use_labels
UpperCAmelCase__ : List[Any] = hidden_size
UpperCAmelCase__ : Dict = num_hidden_layers
UpperCAmelCase__ : List[str] = num_attention_heads
UpperCAmelCase__ : List[Any] = intermediate_size
UpperCAmelCase__ : Dict = hidden_act
UpperCAmelCase__ : List[Any] = hidden_dropout_prob
UpperCAmelCase__ : Optional[int] = attention_probs_dropout_prob
UpperCAmelCase__ : Optional[Any] = type_sequence_label_size
UpperCAmelCase__ : Union[str, Any] = initializer_range
UpperCAmelCase__ : int = mask_ratio
UpperCAmelCase__ : Tuple = scope
# in ViTMAE, the expected sequence length = (num_patches + 1) * (1 - config.mask_ratio), rounded above
# (we add 1 for the [CLS] token)
UpperCAmelCase__ : int = (image_size // patch_size) ** 2
UpperCAmelCase__ : str = int(math.ceil((1 - mask_ratio) * (num_patches + 1) ) )
def lowerCAmelCase__ ( self )-> List[Any]:
UpperCAmelCase__ : List[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCAmelCase__ : Optional[Any] = None
if self.use_labels:
UpperCAmelCase__ : Dict = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCAmelCase__ : Optional[int] = self.get_config()
return config, pixel_values, labels
def lowerCAmelCase__ ( self )-> int:
return ViTMAEConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=__UpperCamelCase , initializer_range=self.initializer_range , mask_ratio=self.mask_ratio , )
def lowerCAmelCase__ ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )-> Union[str, Any]:
UpperCAmelCase__ : Optional[Any] = ViTMAEModel(config=__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
UpperCAmelCase__ : List[str] = model(__UpperCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCAmelCase__ ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )-> Optional[int]:
UpperCAmelCase__ : List[Any] = ViTMAEForPreTraining(__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
UpperCAmelCase__ : Optional[int] = model(__UpperCamelCase )
UpperCAmelCase__ : List[str] = (self.image_size // self.patch_size) ** 2
UpperCAmelCase__ : List[str] = self.patch_size**2 * self.num_channels
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) )
# test greyscale images
UpperCAmelCase__ : Dict = 1
UpperCAmelCase__ : str = ViTMAEForPreTraining(__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
UpperCAmelCase__ : int = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
UpperCAmelCase__ : List[str] = model(__UpperCamelCase )
UpperCAmelCase__ : List[str] = self.patch_size**2
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) )
def lowerCAmelCase__ ( self )-> Dict:
UpperCAmelCase__ : Tuple = self.prepare_config_and_inputs()
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : str = config_and_inputs
UpperCAmelCase__ : int = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class _lowercase ( lowerCAmelCase_ , lowerCAmelCase_ , unittest.TestCase ):
'''simple docstring'''
_A = (ViTMAEModel, ViTMAEForPreTraining) if is_torch_available() else ()
_A = {'feature-extraction': ViTMAEModel} if is_torch_available() else {}
_A = False
_A = False
_A = False
_A = False
def lowerCAmelCase__ ( self )-> List[Any]:
UpperCAmelCase__ : Any = ViTMAEModelTester(self )
UpperCAmelCase__ : List[str] = ConfigTester(self , config_class=__UpperCamelCase , has_text_modality=__UpperCamelCase , hidden_size=37 )
def lowerCAmelCase__ ( self )-> int:
self.config_tester.run_common_tests()
@unittest.skip(reason="ViTMAE does not use inputs_embeds" )
def lowerCAmelCase__ ( self )-> Dict:
pass
def lowerCAmelCase__ ( self )-> List[str]:
UpperCAmelCase__ , UpperCAmelCase__ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase__ : int = model_class(__UpperCamelCase )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
UpperCAmelCase__ : Tuple = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__UpperCamelCase , nn.Linear ) )
def lowerCAmelCase__ ( self )-> Optional[Any]:
UpperCAmelCase__ , UpperCAmelCase__ : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase__ : List[str] = model_class(__UpperCamelCase )
UpperCAmelCase__ : Dict = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase__ : Dict = [*signature.parameters.keys()]
UpperCAmelCase__ : Tuple = ["pixel_values"]
self.assertListEqual(arg_names[:1] , __UpperCamelCase )
def lowerCAmelCase__ ( self )-> Any:
UpperCAmelCase__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__UpperCamelCase )
def lowerCAmelCase__ ( self )-> Dict:
UpperCAmelCase__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*__UpperCamelCase )
def lowerCAmelCase__ ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )-> Dict:
# make masks reproducible
np.random.seed(2 )
UpperCAmelCase__ : Tuple = int((pt_model.config.image_size // pt_model.config.patch_size) ** 2 )
UpperCAmelCase__ : Union[str, Any] = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
UpperCAmelCase__ : str = torch.from_numpy(__UpperCamelCase )
# Add `noise` argument.
# PT inputs will be prepared in `super().check_pt_tf_models()` with this added `noise` argument
UpperCAmelCase__ : Optional[Any] = pt_noise
super().check_pt_tf_models(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
def lowerCAmelCase__ ( self )-> List[Any]:
UpperCAmelCase__ , UpperCAmelCase__ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase__ : List[Any] = model_class(__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
# make random mask reproducible
torch.manual_seed(2 )
with torch.no_grad():
UpperCAmelCase__ : Optional[int] = model(**self._prepare_for_class(__UpperCamelCase , __UpperCamelCase ) )
UpperCAmelCase__ : Optional[Any] = outputs[0].cpu().numpy()
UpperCAmelCase__ : Union[str, Any] = 0
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(__UpperCamelCase )
UpperCAmelCase__ : Union[str, Any] = model_class.from_pretrained(__UpperCamelCase )
model.to(__UpperCamelCase )
# make random mask reproducible
torch.manual_seed(2 )
with torch.no_grad():
UpperCAmelCase__ : List[Any] = model(**self._prepare_for_class(__UpperCamelCase , __UpperCamelCase ) )
# Make sure we don't have nans
UpperCAmelCase__ : Tuple = after_outputs[0].cpu().numpy()
UpperCAmelCase__ : int = 0
UpperCAmelCase__ : str = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(__UpperCamelCase , 1E-5 )
@unittest.skip(
reason="ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load\n to get deterministic results." )
def lowerCAmelCase__ ( self )-> List[str]:
pass
@unittest.skip(
reason="ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load\n to get deterministic results." )
def lowerCAmelCase__ ( self )-> Any:
pass
@unittest.skip(
reason="ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load\n to get deterministic results." )
def lowerCAmelCase__ ( self )-> Optional[Any]:
pass
@unittest.skip(reason="ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load" )
def lowerCAmelCase__ ( self )-> List[Any]:
pass
@unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." )
def lowerCAmelCase__ ( self )-> Union[str, Any]:
pass
@slow
def lowerCAmelCase__ ( self )-> Union[str, Any]:
for model_name in VIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase__ : Tuple = ViTMAEModel.from_pretrained(__UpperCamelCase )
self.assertIsNotNone(__UpperCamelCase )
def a__ ( ):
'''simple docstring'''
UpperCAmelCase__ : Union[str, Any] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class _lowercase ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def lowerCAmelCase__ ( self )-> List[Any]:
return ViTImageProcessor.from_pretrained("facebook/vit-mae-base" ) if is_vision_available() else None
@slow
def lowerCAmelCase__ ( self )-> Optional[int]:
# make random mask reproducible across the PT and TF model
np.random.seed(2 )
UpperCAmelCase__ : Any = ViTMAEForPreTraining.from_pretrained("facebook/vit-mae-base" ).to(__UpperCamelCase )
UpperCAmelCase__ : Tuple = self.default_image_processor
UpperCAmelCase__ : List[Any] = prepare_img()
UpperCAmelCase__ : Optional[Any] = image_processor(images=__UpperCamelCase , return_tensors="pt" ).to(__UpperCamelCase )
# prepare a noise vector that will be also used for testing the TF model
# (this way we can ensure that the PT and TF models operate on the same inputs)
UpperCAmelCase__ : List[Any] = ViTMAEConfig()
UpperCAmelCase__ : str = int((vit_mae_config.image_size // vit_mae_config.patch_size) ** 2 )
UpperCAmelCase__ : Optional[int] = np.random.uniform(size=(1, num_patches) )
# forward pass
with torch.no_grad():
UpperCAmelCase__ : List[str] = model(**__UpperCamelCase , noise=torch.from_numpy(__UpperCamelCase ).to(device=__UpperCamelCase ) )
# verify the logits
UpperCAmelCase__ : str = torch.Size((1, 1_96, 7_68) )
self.assertEqual(outputs.logits.shape , __UpperCamelCase )
UpperCAmelCase__ : List[Any] = torch.tensor(
[[-0.0548, -1.7023, -0.9325], [0.3721, -0.5670, -0.2233], [0.8235, -1.3878, -0.3524]] )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3] , expected_slice.to(__UpperCamelCase ) , atol=1E-4 ) )
| 660 |
"""simple docstring"""
from typing import Dict, List, Optional, Union
import numpy as np
from transformers.utils import is_vision_available
from transformers.utils.generic import TensorType
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
is_valid_image,
to_numpy_array,
valid_images,
)
from ...utils import logging
if is_vision_available():
import PIL
A__ : Dict = logging.get_logger(__name__)
def a__ ( lowerCAmelCase : Optional[Any] ):
'''simple docstring'''
if isinstance(lowerCAmelCase , (list, tuple) ) and isinstance(videos[0] , (list, tuple) ) and is_valid_image(videos[0][0] ):
return videos
elif isinstance(lowerCAmelCase , (list, tuple) ) and is_valid_image(videos[0] ):
return [videos]
elif is_valid_image(lowerCAmelCase ):
return [[videos]]
raise ValueError(F"Could not make batched video from {videos}" )
class _lowercase ( lowerCAmelCase_ ):
'''simple docstring'''
_A = ['pixel_values']
def __init__( self , __UpperCamelCase = True , __UpperCamelCase = None , __UpperCamelCase = PILImageResampling.BILINEAR , __UpperCamelCase = True , __UpperCamelCase = None , __UpperCamelCase = True , __UpperCamelCase = 1 / 2_55 , __UpperCamelCase = True , __UpperCamelCase = True , __UpperCamelCase = None , __UpperCamelCase = None , **__UpperCamelCase , )-> None:
super().__init__(**__UpperCamelCase )
UpperCAmelCase__ : Optional[Any] = size if size is not None else {"shortest_edge": 2_56}
UpperCAmelCase__ : List[Any] = get_size_dict(__UpperCamelCase , default_to_square=__UpperCamelCase )
UpperCAmelCase__ : List[str] = crop_size if crop_size is not None else {"height": 2_24, "width": 2_24}
UpperCAmelCase__ : int = get_size_dict(__UpperCamelCase , param_name="crop_size" )
UpperCAmelCase__ : Dict = do_resize
UpperCAmelCase__ : Optional[int] = size
UpperCAmelCase__ : List[Any] = do_center_crop
UpperCAmelCase__ : str = crop_size
UpperCAmelCase__ : Optional[int] = resample
UpperCAmelCase__ : int = do_rescale
UpperCAmelCase__ : Union[str, Any] = rescale_factor
UpperCAmelCase__ : Union[str, Any] = offset
UpperCAmelCase__ : Dict = do_normalize
UpperCAmelCase__ : int = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
UpperCAmelCase__ : Union[str, Any] = image_std if image_std is not None else IMAGENET_STANDARD_STD
def lowerCAmelCase__ ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = PILImageResampling.BILINEAR , __UpperCamelCase = None , **__UpperCamelCase , )-> np.ndarray:
UpperCAmelCase__ : Optional[int] = get_size_dict(__UpperCamelCase , default_to_square=__UpperCamelCase )
if "shortest_edge" in size:
UpperCAmelCase__ : Union[str, Any] = get_resize_output_image_size(__UpperCamelCase , size["shortest_edge"] , default_to_square=__UpperCamelCase )
elif "height" in size and "width" in size:
UpperCAmelCase__ : Any = (size["height"], size["width"])
else:
raise ValueError(F"Size must have 'height' and 'width' or 'shortest_edge' as keys. Got {size.keys()}" )
return resize(__UpperCamelCase , size=__UpperCamelCase , resample=__UpperCamelCase , data_format=__UpperCamelCase , **__UpperCamelCase )
def lowerCAmelCase__ ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = None , **__UpperCamelCase , )-> np.ndarray:
UpperCAmelCase__ : Optional[Any] = get_size_dict(__UpperCamelCase )
if "height" not in size or "width" not in size:
raise ValueError(F"Size must have 'height' and 'width' as keys. Got {size.keys()}" )
return center_crop(__UpperCamelCase , size=(size["height"], size["width"]) , data_format=__UpperCamelCase , **__UpperCamelCase )
def lowerCAmelCase__ ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = True , __UpperCamelCase = None , **__UpperCamelCase , )-> Tuple:
UpperCAmelCase__ : str = image.astype(np.floataa )
if offset:
UpperCAmelCase__ : Tuple = image - (scale / 2)
return rescale(__UpperCamelCase , scale=__UpperCamelCase , data_format=__UpperCamelCase , **__UpperCamelCase )
def lowerCAmelCase__ ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = None , **__UpperCamelCase , )-> np.ndarray:
return normalize(__UpperCamelCase , mean=__UpperCamelCase , std=__UpperCamelCase , data_format=__UpperCamelCase , **__UpperCamelCase )
def lowerCAmelCase__ ( self , __UpperCamelCase , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = ChannelDimension.FIRST , )-> np.ndarray:
if do_resize and size is None or resample is None:
raise ValueError("Size and resample must be specified if do_resize is True." )
if do_center_crop and crop_size is None:
raise ValueError("Crop size must be specified if do_center_crop is True." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("Image mean and std must be specified if do_normalize is True." )
if offset and not do_rescale:
raise ValueError("For offset, do_rescale must also be set to True." )
# All transformations expect numpy arrays.
UpperCAmelCase__ : Optional[Any] = to_numpy_array(__UpperCamelCase )
if do_resize:
UpperCAmelCase__ : Union[str, Any] = self.resize(image=__UpperCamelCase , size=__UpperCamelCase , resample=__UpperCamelCase )
if do_center_crop:
UpperCAmelCase__ : int = self.center_crop(__UpperCamelCase , size=__UpperCamelCase )
if do_rescale:
UpperCAmelCase__ : List[str] = self.rescale(image=__UpperCamelCase , scale=__UpperCamelCase , offset=__UpperCamelCase )
if do_normalize:
UpperCAmelCase__ : List[Any] = self.normalize(image=__UpperCamelCase , mean=__UpperCamelCase , std=__UpperCamelCase )
UpperCAmelCase__ : Dict = to_channel_dimension_format(__UpperCamelCase , __UpperCamelCase )
return image
def lowerCAmelCase__ ( self , __UpperCamelCase , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = ChannelDimension.FIRST , **__UpperCamelCase , )-> PIL.Image.Image:
UpperCAmelCase__ : Optional[int] = do_resize if do_resize is not None else self.do_resize
UpperCAmelCase__ : int = resample if resample is not None else self.resample
UpperCAmelCase__ : Tuple = do_center_crop if do_center_crop is not None else self.do_center_crop
UpperCAmelCase__ : int = do_rescale if do_rescale is not None else self.do_rescale
UpperCAmelCase__ : Any = rescale_factor if rescale_factor is not None else self.rescale_factor
UpperCAmelCase__ : Optional[int] = offset if offset is not None else self.offset
UpperCAmelCase__ : Dict = do_normalize if do_normalize is not None else self.do_normalize
UpperCAmelCase__ : Dict = image_mean if image_mean is not None else self.image_mean
UpperCAmelCase__ : Optional[int] = image_std if image_std is not None else self.image_std
UpperCAmelCase__ : List[str] = size if size is not None else self.size
UpperCAmelCase__ : Optional[int] = get_size_dict(__UpperCamelCase , default_to_square=__UpperCamelCase )
UpperCAmelCase__ : Dict = crop_size if crop_size is not None else self.crop_size
UpperCAmelCase__ : Tuple = get_size_dict(__UpperCamelCase , param_name="crop_size" )
if not valid_images(__UpperCamelCase ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
UpperCAmelCase__ : List[str] = make_batched(__UpperCamelCase )
UpperCAmelCase__ : Optional[Any] = [
[
self._preprocess_image(
image=__UpperCamelCase , do_resize=__UpperCamelCase , size=__UpperCamelCase , resample=__UpperCamelCase , do_center_crop=__UpperCamelCase , crop_size=__UpperCamelCase , do_rescale=__UpperCamelCase , rescale_factor=__UpperCamelCase , offset=__UpperCamelCase , do_normalize=__UpperCamelCase , image_mean=__UpperCamelCase , image_std=__UpperCamelCase , data_format=__UpperCamelCase , )
for img in video
]
for video in videos
]
UpperCAmelCase__ : Dict = {"pixel_values": videos}
return BatchFeature(data=__UpperCamelCase , tensor_type=__UpperCamelCase )
| 660 | 1 |
"""simple docstring"""
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
WavaVecaConfig,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaForCTC,
WavaVecaForPreTraining,
WavaVecaProcessor,
logging,
)
from transformers.models.wavaveca.modeling_wavaveca import WavaVecaForSequenceClassification
logging.set_verbosity_info()
A__ : int = logging.get_logger(__name__)
A__ : int = {
"""post_extract_proj""": """feature_projection.projection""",
"""encoder.pos_conv.0""": """encoder.pos_conv_embed.conv""",
"""self_attn.k_proj""": """encoder.layers.*.attention.k_proj""",
"""self_attn.v_proj""": """encoder.layers.*.attention.v_proj""",
"""self_attn.q_proj""": """encoder.layers.*.attention.q_proj""",
"""self_attn.out_proj""": """encoder.layers.*.attention.out_proj""",
"""self_attn_layer_norm""": """encoder.layers.*.layer_norm""",
"""fc1""": """encoder.layers.*.feed_forward.intermediate_dense""",
"""fc2""": """encoder.layers.*.feed_forward.output_dense""",
"""final_layer_norm""": """encoder.layers.*.final_layer_norm""",
"""encoder.layer_norm""": """encoder.layer_norm""",
"""adapter_layer""": """encoder.layers.*.adapter_layer""",
"""w2v_model.layer_norm""": """feature_projection.layer_norm""",
"""quantizer.weight_proj""": """quantizer.weight_proj""",
"""quantizer.vars""": """quantizer.codevectors""",
"""project_q""": """project_q""",
"""final_proj""": """project_hid""",
"""w2v_encoder.proj""": """lm_head""",
"""mask_emb""": """masked_spec_embed""",
"""pooling_layer.linear""": """projector""",
"""pooling_layer.projection""": """classifier""",
}
A__ : Any = [
"""lm_head""",
"""quantizer.weight_proj""",
"""quantizer.codevectors""",
"""project_q""",
"""project_hid""",
"""projector""",
"""classifier""",
]
def a__ ( lowerCAmelCase : str ):
'''simple docstring'''
UpperCAmelCase__ : Dict = {}
with open(lowerCAmelCase , "r" ) as file:
for line_number, line in enumerate(lowerCAmelCase ):
UpperCAmelCase__ : str = line.strip()
if line:
UpperCAmelCase__ : List[str] = line.split()
UpperCAmelCase__ : str = line_number
UpperCAmelCase__ : List[str] = words[0]
UpperCAmelCase__ : Tuple = value
return result
def a__ ( lowerCAmelCase : Dict , lowerCAmelCase : int , lowerCAmelCase : int , lowerCAmelCase : List[str] , lowerCAmelCase : Union[str, Any] ):
'''simple docstring'''
for attribute in key.split("." ):
UpperCAmelCase__ : Dict = getattr(lowerCAmelCase , lowerCAmelCase )
UpperCAmelCase__ : Optional[Any] = None
for param_key in PARAM_MAPPING.keys():
if full_name.endswith(lowerCAmelCase ):
UpperCAmelCase__ : int = PARAM_MAPPING[full_name.split("." )[-1]]
UpperCAmelCase__ : Tuple = "param"
if weight_type is not None and weight_type != "param":
UpperCAmelCase__ : Tuple = getattr(lowerCAmelCase , lowerCAmelCase ).shape
elif weight_type is not None and weight_type == "param":
UpperCAmelCase__ : str = hf_pointer
for attribute in hf_param_name.split("." ):
UpperCAmelCase__ : Optional[int] = getattr(lowerCAmelCase , lowerCAmelCase )
UpperCAmelCase__ : Optional[Any] = shape_pointer.shape
# let's reduce dimension
UpperCAmelCase__ : Tuple = value[0]
else:
UpperCAmelCase__ : Tuple = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
F"Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be"
F" {value.shape} for {full_name}" )
if weight_type == "weight":
UpperCAmelCase__ : Dict = value
elif weight_type == "weight_g":
UpperCAmelCase__ : List[Any] = value
elif weight_type == "weight_v":
UpperCAmelCase__ : List[Any] = value
elif weight_type == "bias":
UpperCAmelCase__ : Any = value
elif weight_type == "param":
for attribute in hf_param_name.split("." ):
UpperCAmelCase__ : Tuple = getattr(lowerCAmelCase , lowerCAmelCase )
UpperCAmelCase__ : str = value
else:
UpperCAmelCase__ : str = value
logger.info(F"{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}." )
def a__ ( lowerCAmelCase : Dict , lowerCAmelCase : int , lowerCAmelCase : int , lowerCAmelCase : Union[str, Any] , lowerCAmelCase : Optional[int] ):
'''simple docstring'''
UpperCAmelCase__ : Dict = None
for param_key in PARAM_MAPPING.keys():
if full_name.endswith(lowerCAmelCase ):
UpperCAmelCase__ : Tuple = PARAM_MAPPING[full_name.split("." )[-1]]
UpperCAmelCase__ : List[Any] = "param"
if weight_type is not None and weight_type != "param":
UpperCAmelCase__ : int = ".".join([key, weight_type] )
elif weight_type is not None and weight_type == "param":
UpperCAmelCase__ : str = ".".join([key, hf_param_name] )
else:
UpperCAmelCase__ : Tuple = key
UpperCAmelCase__ : Dict = value if "lm_head" in full_key else value[0]
A__ : Any = {
"""W_a""": """linear_1.weight""",
"""W_b""": """linear_2.weight""",
"""b_a""": """linear_1.bias""",
"""b_b""": """linear_2.bias""",
"""ln_W""": """norm.weight""",
"""ln_b""": """norm.bias""",
}
def a__ ( lowerCAmelCase : int , lowerCAmelCase : Tuple , lowerCAmelCase : Any=None , lowerCAmelCase : Union[str, Any]=None ):
'''simple docstring'''
UpperCAmelCase__ : Optional[Any] = False
for key, mapped_key in MAPPING.items():
UpperCAmelCase__ : List[str] = "wav2vec2." + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split("w2v_model." )[-1] == name.split("." )[0]:
UpperCAmelCase__ : List[Any] = True
if "*" in mapped_key:
UpperCAmelCase__ : Dict = name.split(lowerCAmelCase )[0].split("." )[-2]
UpperCAmelCase__ : Dict = mapped_key.replace("*" , lowerCAmelCase )
if "weight_g" in name:
UpperCAmelCase__ : Optional[int] = "weight_g"
elif "weight_v" in name:
UpperCAmelCase__ : Dict = "weight_v"
elif "bias" in name:
UpperCAmelCase__ : Dict = "bias"
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
UpperCAmelCase__ : Optional[int] = "weight"
else:
UpperCAmelCase__ : str = None
if hf_dict is not None:
rename_dict(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
else:
set_recursively(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
return is_used
return is_used
def a__ ( lowerCAmelCase : Dict , lowerCAmelCase : Any , lowerCAmelCase : Any ):
'''simple docstring'''
UpperCAmelCase__ : str = []
UpperCAmelCase__ : Union[str, Any] = fairseq_model.state_dict()
UpperCAmelCase__ : str = hf_model.wavaveca.feature_extractor
for name, value in fairseq_dict.items():
UpperCAmelCase__ : List[Any] = False
if "conv_layers" in name:
load_conv_layer(
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , hf_model.config.feat_extract_norm == "group" , )
UpperCAmelCase__ : Any = True
else:
UpperCAmelCase__ : str = load_wavaveca_layer(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
if not is_used:
unused_weights.append(lowerCAmelCase )
logger.warning(F"Unused weights: {unused_weights}" )
def a__ ( lowerCAmelCase : Optional[int] , lowerCAmelCase : Any , lowerCAmelCase : Tuple , lowerCAmelCase : str , lowerCAmelCase : Any ):
'''simple docstring'''
UpperCAmelCase__ : List[Any] = full_name.split("conv_layers." )[-1]
UpperCAmelCase__ : Any = name.split("." )
UpperCAmelCase__ : Optional[int] = int(items[0] )
UpperCAmelCase__ : Any = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
F"{full_name} has size {value.shape}, but"
F" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found." )
UpperCAmelCase__ : int = value
logger.info(F"Feat extract conv layer {layer_id} was initialized from {full_name}." )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
F"{full_name} has size {value.shape}, but"
F" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found." )
UpperCAmelCase__ : Any = value
logger.info(F"Feat extract conv layer {layer_id} was initialized from {full_name}." )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
F"{full_name} has size {value.shape}, but"
F" {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found." )
UpperCAmelCase__ : Dict = value
logger.info(F"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
F"{full_name} has size {value.shape}, but"
F" {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found." )
UpperCAmelCase__ : List[str] = value
logger.info(F"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." )
else:
unused_weights.append(lowerCAmelCase )
@torch.no_grad()
def a__ ( lowerCAmelCase : str , lowerCAmelCase : Dict , lowerCAmelCase : Tuple=None , lowerCAmelCase : str=None , lowerCAmelCase : Optional[int]=True , lowerCAmelCase : Any=False ):
'''simple docstring'''
if config_path is not None:
UpperCAmelCase__ : Optional[int] = WavaVecaConfig.from_pretrained(lowerCAmelCase )
else:
UpperCAmelCase__ : Any = WavaVecaConfig()
if is_seq_class:
UpperCAmelCase__ : str = read_txt_into_dict(lowerCAmelCase )
UpperCAmelCase__ : Optional[Any] = idalabel
UpperCAmelCase__ : List[str] = WavaVecaForSequenceClassification(lowerCAmelCase )
UpperCAmelCase__ : Tuple = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_6000 , padding_value=0 , do_normalize=lowerCAmelCase , return_attention_mask=lowerCAmelCase , )
feature_extractor.save_pretrained(lowerCAmelCase )
elif is_finetuned:
if dict_path:
UpperCAmelCase__ : Optional[Any] = Dictionary.load(lowerCAmelCase )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
UpperCAmelCase__ : Union[str, Any] = target_dict.pad_index
UpperCAmelCase__ : List[str] = target_dict.bos_index
UpperCAmelCase__ : List[str] = target_dict.eos_index
UpperCAmelCase__ : List[str] = len(target_dict.symbols )
UpperCAmelCase__ : List[Any] = os.path.join(lowerCAmelCase , "vocab.json" )
if not os.path.isdir(lowerCAmelCase ):
logger.error("--pytorch_dump_folder_path ({}) should be a directory".format(lowerCAmelCase ) )
return
os.makedirs(lowerCAmelCase , exist_ok=lowerCAmelCase )
UpperCAmelCase__ : List[str] = target_dict.indices
# fairseq has the <pad> and <s> switched
UpperCAmelCase__ : List[Any] = 0
UpperCAmelCase__ : Union[str, Any] = 1
with open(lowerCAmelCase , "w" , encoding="utf-8" ) as vocab_handle:
json.dump(lowerCAmelCase , lowerCAmelCase )
UpperCAmelCase__ : Optional[int] = WavaVecaCTCTokenizer(
lowerCAmelCase , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token="|" , do_lower_case=lowerCAmelCase , )
UpperCAmelCase__ : List[str] = True if config.feat_extract_norm == "layer" else False
UpperCAmelCase__ : Union[str, Any] = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_6000 , padding_value=0 , do_normalize=lowerCAmelCase , return_attention_mask=lowerCAmelCase , )
UpperCAmelCase__ : Optional[int] = WavaVecaProcessor(feature_extractor=lowerCAmelCase , tokenizer=lowerCAmelCase )
processor.save_pretrained(lowerCAmelCase )
UpperCAmelCase__ : Union[str, Any] = WavaVecaForCTC(lowerCAmelCase )
else:
UpperCAmelCase__ : Any = WavaVecaForPreTraining(lowerCAmelCase )
if is_finetuned or is_seq_class:
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : List[Any] = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={"data": "/".join(dict_path.split("/" )[:-1] )} )
else:
UpperCAmelCase__ : Optional[int] = argparse.Namespace(task="audio_pretraining" )
UpperCAmelCase__ : Dict = fairseq.tasks.setup_task(lowerCAmelCase )
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : Any = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] , task=lowerCAmelCase )
UpperCAmelCase__ : Optional[int] = model[0].eval()
recursively_load_weights(lowerCAmelCase , lowerCAmelCase , not is_finetuned )
hf_wavavec.save_pretrained(lowerCAmelCase )
if __name__ == "__main__":
A__ : Union[str, Any] = argparse.ArgumentParser()
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to fairseq checkpoint""")
parser.add_argument("""--dict_path""", default=None, type=str, help="""Path to dict of fine-tuned model""")
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""")
parser.add_argument(
"""--not_finetuned""", action="""store_true""", help="""Whether the model to convert is a fine-tuned model or not"""
)
parser.add_argument(
"""--is_seq_class""",
action="""store_true""",
help="""Whether the model to convert is a fine-tuned sequence classification model or not""",
)
A__ : Union[str, Any] = parser.parse_args()
A__ : int = not args.not_finetuned and not args.is_seq_class
convert_wavaveca_checkpoint(
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.config_path,
args.dict_path,
is_finetuned,
args.is_seq_class,
)
| 660 |
"""simple docstring"""
def a__ ( lowerCAmelCase : int ):
'''simple docstring'''
if a < 0:
raise ValueError("Input value must be a positive integer" )
elif isinstance(lowerCAmelCase , lowerCAmelCase ):
raise TypeError("Input value must be a 'int' type" )
return bin(lowerCAmelCase ).count("1" )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 660 | 1 |
"""simple docstring"""
from typing import Dict, List, Optional, Union
import numpy as np
from transformers.utils import is_vision_available
from transformers.utils.generic import TensorType
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
is_valid_image,
to_numpy_array,
valid_images,
)
from ...utils import logging
if is_vision_available():
import PIL
A__ : Dict = logging.get_logger(__name__)
def a__ ( lowerCAmelCase : Optional[Any] ):
'''simple docstring'''
if isinstance(lowerCAmelCase , (list, tuple) ) and isinstance(videos[0] , (list, tuple) ) and is_valid_image(videos[0][0] ):
return videos
elif isinstance(lowerCAmelCase , (list, tuple) ) and is_valid_image(videos[0] ):
return [videos]
elif is_valid_image(lowerCAmelCase ):
return [[videos]]
raise ValueError(F"Could not make batched video from {videos}" )
class _lowercase ( lowerCAmelCase_ ):
'''simple docstring'''
_A = ['pixel_values']
def __init__( self , __UpperCamelCase = True , __UpperCamelCase = None , __UpperCamelCase = PILImageResampling.BILINEAR , __UpperCamelCase = True , __UpperCamelCase = None , __UpperCamelCase = True , __UpperCamelCase = 1 / 2_55 , __UpperCamelCase = True , __UpperCamelCase = True , __UpperCamelCase = None , __UpperCamelCase = None , **__UpperCamelCase , )-> None:
super().__init__(**__UpperCamelCase )
UpperCAmelCase__ : Optional[Any] = size if size is not None else {"shortest_edge": 2_56}
UpperCAmelCase__ : List[Any] = get_size_dict(__UpperCamelCase , default_to_square=__UpperCamelCase )
UpperCAmelCase__ : List[str] = crop_size if crop_size is not None else {"height": 2_24, "width": 2_24}
UpperCAmelCase__ : int = get_size_dict(__UpperCamelCase , param_name="crop_size" )
UpperCAmelCase__ : Dict = do_resize
UpperCAmelCase__ : Optional[int] = size
UpperCAmelCase__ : List[Any] = do_center_crop
UpperCAmelCase__ : str = crop_size
UpperCAmelCase__ : Optional[int] = resample
UpperCAmelCase__ : int = do_rescale
UpperCAmelCase__ : Union[str, Any] = rescale_factor
UpperCAmelCase__ : Union[str, Any] = offset
UpperCAmelCase__ : Dict = do_normalize
UpperCAmelCase__ : int = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
UpperCAmelCase__ : Union[str, Any] = image_std if image_std is not None else IMAGENET_STANDARD_STD
def lowerCAmelCase__ ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = PILImageResampling.BILINEAR , __UpperCamelCase = None , **__UpperCamelCase , )-> np.ndarray:
UpperCAmelCase__ : Optional[int] = get_size_dict(__UpperCamelCase , default_to_square=__UpperCamelCase )
if "shortest_edge" in size:
UpperCAmelCase__ : Union[str, Any] = get_resize_output_image_size(__UpperCamelCase , size["shortest_edge"] , default_to_square=__UpperCamelCase )
elif "height" in size and "width" in size:
UpperCAmelCase__ : Any = (size["height"], size["width"])
else:
raise ValueError(F"Size must have 'height' and 'width' or 'shortest_edge' as keys. Got {size.keys()}" )
return resize(__UpperCamelCase , size=__UpperCamelCase , resample=__UpperCamelCase , data_format=__UpperCamelCase , **__UpperCamelCase )
def lowerCAmelCase__ ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = None , **__UpperCamelCase , )-> np.ndarray:
UpperCAmelCase__ : Optional[Any] = get_size_dict(__UpperCamelCase )
if "height" not in size or "width" not in size:
raise ValueError(F"Size must have 'height' and 'width' as keys. Got {size.keys()}" )
return center_crop(__UpperCamelCase , size=(size["height"], size["width"]) , data_format=__UpperCamelCase , **__UpperCamelCase )
def lowerCAmelCase__ ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = True , __UpperCamelCase = None , **__UpperCamelCase , )-> Tuple:
UpperCAmelCase__ : str = image.astype(np.floataa )
if offset:
UpperCAmelCase__ : Tuple = image - (scale / 2)
return rescale(__UpperCamelCase , scale=__UpperCamelCase , data_format=__UpperCamelCase , **__UpperCamelCase )
def lowerCAmelCase__ ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = None , **__UpperCamelCase , )-> np.ndarray:
return normalize(__UpperCamelCase , mean=__UpperCamelCase , std=__UpperCamelCase , data_format=__UpperCamelCase , **__UpperCamelCase )
def lowerCAmelCase__ ( self , __UpperCamelCase , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = ChannelDimension.FIRST , )-> np.ndarray:
if do_resize and size is None or resample is None:
raise ValueError("Size and resample must be specified if do_resize is True." )
if do_center_crop and crop_size is None:
raise ValueError("Crop size must be specified if do_center_crop is True." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("Image mean and std must be specified if do_normalize is True." )
if offset and not do_rescale:
raise ValueError("For offset, do_rescale must also be set to True." )
# All transformations expect numpy arrays.
UpperCAmelCase__ : Optional[Any] = to_numpy_array(__UpperCamelCase )
if do_resize:
UpperCAmelCase__ : Union[str, Any] = self.resize(image=__UpperCamelCase , size=__UpperCamelCase , resample=__UpperCamelCase )
if do_center_crop:
UpperCAmelCase__ : int = self.center_crop(__UpperCamelCase , size=__UpperCamelCase )
if do_rescale:
UpperCAmelCase__ : List[str] = self.rescale(image=__UpperCamelCase , scale=__UpperCamelCase , offset=__UpperCamelCase )
if do_normalize:
UpperCAmelCase__ : List[Any] = self.normalize(image=__UpperCamelCase , mean=__UpperCamelCase , std=__UpperCamelCase )
UpperCAmelCase__ : Dict = to_channel_dimension_format(__UpperCamelCase , __UpperCamelCase )
return image
def lowerCAmelCase__ ( self , __UpperCamelCase , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = ChannelDimension.FIRST , **__UpperCamelCase , )-> PIL.Image.Image:
UpperCAmelCase__ : Optional[int] = do_resize if do_resize is not None else self.do_resize
UpperCAmelCase__ : int = resample if resample is not None else self.resample
UpperCAmelCase__ : Tuple = do_center_crop if do_center_crop is not None else self.do_center_crop
UpperCAmelCase__ : int = do_rescale if do_rescale is not None else self.do_rescale
UpperCAmelCase__ : Any = rescale_factor if rescale_factor is not None else self.rescale_factor
UpperCAmelCase__ : Optional[int] = offset if offset is not None else self.offset
UpperCAmelCase__ : Dict = do_normalize if do_normalize is not None else self.do_normalize
UpperCAmelCase__ : Dict = image_mean if image_mean is not None else self.image_mean
UpperCAmelCase__ : Optional[int] = image_std if image_std is not None else self.image_std
UpperCAmelCase__ : List[str] = size if size is not None else self.size
UpperCAmelCase__ : Optional[int] = get_size_dict(__UpperCamelCase , default_to_square=__UpperCamelCase )
UpperCAmelCase__ : Dict = crop_size if crop_size is not None else self.crop_size
UpperCAmelCase__ : Tuple = get_size_dict(__UpperCamelCase , param_name="crop_size" )
if not valid_images(__UpperCamelCase ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
UpperCAmelCase__ : List[str] = make_batched(__UpperCamelCase )
UpperCAmelCase__ : Optional[Any] = [
[
self._preprocess_image(
image=__UpperCamelCase , do_resize=__UpperCamelCase , size=__UpperCamelCase , resample=__UpperCamelCase , do_center_crop=__UpperCamelCase , crop_size=__UpperCamelCase , do_rescale=__UpperCamelCase , rescale_factor=__UpperCamelCase , offset=__UpperCamelCase , do_normalize=__UpperCamelCase , image_mean=__UpperCamelCase , image_std=__UpperCamelCase , data_format=__UpperCamelCase , )
for img in video
]
for video in videos
]
UpperCAmelCase__ : Dict = {"pixel_values": videos}
return BatchFeature(data=__UpperCamelCase , tensor_type=__UpperCamelCase )
| 660 |
"""simple docstring"""
import random
import timeit
from functools import wraps
from typing import Callable, Optional
from ..configuration_utils import PretrainedConfig
from ..models.auto.modeling_tf_auto import TF_MODEL_MAPPING, TF_MODEL_WITH_LM_HEAD_MAPPING
from ..utils import is_pyanvml_available, is_tf_available, logging
from .benchmark_utils import (
Benchmark,
Memory,
MemorySummary,
measure_peak_memory_cpu,
start_memory_tracing,
stop_memory_tracing,
)
if is_tf_available():
import tensorflow as tf
from tensorflow.python.framework.errors_impl import ResourceExhaustedError
from .benchmark_args_tf import TensorFlowBenchmarkArguments
if is_pyanvml_available():
import pyanvml.pyanvml as nvml
A__ : Optional[Any] = logging.get_logger(__name__)
def a__ ( lowerCAmelCase : bool , lowerCAmelCase : bool ):
'''simple docstring'''
def run_func(lowerCAmelCase : Dict ):
@wraps(lowerCAmelCase )
def run_in_eager_mode(*lowerCAmelCase : Optional[Any] , **lowerCAmelCase : Dict ):
return func(*lowerCAmelCase , **lowerCAmelCase )
@wraps(lowerCAmelCase )
@tf.function(experimental_compile=lowerCAmelCase )
def run_in_graph_mode(*lowerCAmelCase : Optional[Any] , **lowerCAmelCase : Optional[Any] ):
return func(*lowerCAmelCase , **lowerCAmelCase )
if do_eager_mode is True:
if use_xla is not False:
raise ValueError(
"Cannot run model in XLA, if `args.eager_mode` is set to `True`. Please set `args.eager_mode=False`." )
return run_in_eager_mode
else:
return run_in_graph_mode
return run_func
def a__ ( lowerCAmelCase : int , lowerCAmelCase : int , lowerCAmelCase : int ):
'''simple docstring'''
UpperCAmelCase__ : Dict = random.Random()
UpperCAmelCase__ : List[str] = [rng.randint(0 , vocab_size - 1 ) for i in range(batch_size * sequence_length )]
return tf.constant(lowerCAmelCase , shape=(batch_size, sequence_length) , dtype=tf.intaa )
class _lowercase ( lowerCAmelCase_ ):
'''simple docstring'''
_A = 42
_A = 42
_A = "TensorFlow"
@property
def lowerCAmelCase__ ( self )-> Optional[int]:
return tf.__version__
def lowerCAmelCase__ ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )-> float:
# initialize GPU on separate process
UpperCAmelCase__ : Any = self.args.strategy
if strategy is None:
raise ValueError("A device strategy has to be initialized before using TensorFlow." )
UpperCAmelCase__ : Union[str, Any] = self._prepare_inference_func(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
return self._measure_speed(_inference )
def lowerCAmelCase__ ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )-> float:
UpperCAmelCase__ : List[Any] = self.args.strategy
if strategy is None:
raise ValueError("A device strategy has to be initialized before using TensorFlow." )
UpperCAmelCase__ : List[Any] = self._prepare_train_func(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
return self._measure_speed(_train )
def lowerCAmelCase__ ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )-> [Memory, Optional[MemorySummary]]:
# initialize GPU on separate process
if self.args.is_gpu:
tf.config.experimental.set_memory_growth(self.args.gpu_list[self.args.device_idx] , __UpperCamelCase )
UpperCAmelCase__ : List[str] = self.args.strategy
if strategy is None:
raise ValueError("A device strategy has to be initialized before using TensorFlow." )
UpperCAmelCase__ : Any = self._prepare_inference_func(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
return self._measure_memory(_inference )
def lowerCAmelCase__ ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )-> [Memory, Optional[MemorySummary]]:
if self.args.is_gpu:
tf.config.experimental.set_memory_growth(self.args.gpu_list[self.args.device_idx] , __UpperCamelCase )
UpperCAmelCase__ : Any = self.args.strategy
if strategy is None:
raise ValueError("A device strategy has to be initialized before using TensorFlow." )
UpperCAmelCase__ : Optional[Any] = self._prepare_train_func(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
return self._measure_memory(_train )
def lowerCAmelCase__ ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )-> Callable[[], None]:
UpperCAmelCase__ : Union[str, Any] = self.config_dict[model_name]
if self.args.fpaa:
raise NotImplementedError("Mixed precision is currently not supported." )
UpperCAmelCase__ : Optional[int] = (
hasattr(__UpperCamelCase , "architectures" )
and isinstance(config.architectures , __UpperCamelCase )
and len(config.architectures ) > 0
)
if not self.args.only_pretrain_model and has_model_class_in_config:
try:
UpperCAmelCase__ : str = "TF" + config.architectures[0] # prepend 'TF' for tensorflow model
UpperCAmelCase__ : Any = __import__("transformers" , fromlist=[model_class] )
UpperCAmelCase__ : List[Any] = getattr(__UpperCamelCase , __UpperCamelCase )
UpperCAmelCase__ : Dict = model_cls(__UpperCamelCase )
except ImportError:
raise ImportError(
F"{model_class} does not exist. If you just want to test the pretrained model, you might want to"
" set `--only_pretrain_model` or `args.only_pretrain_model=True`." )
else:
UpperCAmelCase__ : Any = TF_MODEL_MAPPING[config.__class__](__UpperCamelCase )
# encoder-decoder has vocab size saved differently
UpperCAmelCase__ : int = config.vocab_size if hasattr(__UpperCamelCase , "vocab_size" ) else config.encoder.vocab_size
UpperCAmelCase__ : Optional[Any] = random_input_ids(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
@run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla )
def encoder_decoder_forward():
return model(__UpperCamelCase , decoder_input_ids=__UpperCamelCase , training=__UpperCamelCase )
@run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla )
def encoder_forward():
return model(__UpperCamelCase , training=__UpperCamelCase )
UpperCAmelCase__ : Dict = encoder_decoder_forward if config.is_encoder_decoder else encoder_forward
return _inference
def lowerCAmelCase__ ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )-> Callable[[], None]:
UpperCAmelCase__ : List[Any] = self.config_dict[model_name]
if self.args.eager_mode is not False:
raise ValueError("Training cannot be done in eager mode. Please make sure that `args.eager_mode = False`." )
if self.args.fpaa:
raise NotImplementedError("Mixed precision is currently not supported." )
UpperCAmelCase__ : Any = (
hasattr(__UpperCamelCase , "architectures" )
and isinstance(config.architectures , __UpperCamelCase )
and len(config.architectures ) > 0
)
if not self.args.only_pretrain_model and has_model_class_in_config:
try:
UpperCAmelCase__ : Any = "TF" + config.architectures[0] # prepend 'TF' for tensorflow model
UpperCAmelCase__ : int = __import__("transformers" , fromlist=[model_class] )
UpperCAmelCase__ : int = getattr(__UpperCamelCase , __UpperCamelCase )
UpperCAmelCase__ : Optional[Any] = model_cls(__UpperCamelCase )
except ImportError:
raise ImportError(
F"{model_class} does not exist. If you just want to test the pretrained model, you might want to"
" set `--only_pretrain_model` or `args.only_pretrain_model=True`." )
else:
UpperCAmelCase__ : List[str] = TF_MODEL_WITH_LM_HEAD_MAPPING[config.__class__](__UpperCamelCase )
# encoder-decoder has vocab size saved differently
UpperCAmelCase__ : Union[str, Any] = config.vocab_size if hasattr(__UpperCamelCase , "vocab_size" ) else config.encoder.vocab_size
UpperCAmelCase__ : Dict = random_input_ids(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
@run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla )
def encoder_decoder_train():
UpperCAmelCase__ : Union[str, Any] = model(__UpperCamelCase , decoder_input_ids=__UpperCamelCase , labels=__UpperCamelCase , training=__UpperCamelCase )[0]
UpperCAmelCase__ : Union[str, Any] = tf.gradients(__UpperCamelCase , model.trainable_variables )
return gradients
@run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla )
def encoder_train():
UpperCAmelCase__ : Union[str, Any] = model(__UpperCamelCase , labels=__UpperCamelCase , training=__UpperCamelCase )[0]
UpperCAmelCase__ : Any = tf.gradients(__UpperCamelCase , model.trainable_variables )
return gradients
UpperCAmelCase__ : str = encoder_decoder_train if config.is_encoder_decoder else encoder_train
return _train
def lowerCAmelCase__ ( self , __UpperCamelCase )-> float:
with self.args.strategy.scope():
try:
if self.args.is_tpu or self.args.use_xla:
# run additional 10 times to stabilize compilation for tpu
logger.info("Do inference on TPU. Running model 5 times to stabilize compilation" )
timeit.repeat(__UpperCamelCase , repeat=1 , number=5 )
# as written in https://docs.python.org/2/library/timeit.html#timeit.Timer.repeat, min should be taken rather than the average
UpperCAmelCase__ : Optional[Any] = timeit.repeat(
__UpperCamelCase , repeat=self.args.repeat , number=10 , )
return min(__UpperCamelCase ) / 10.0
except ResourceExhaustedError as e:
self.print_fn(F"Doesn't fit on GPU. {e}" )
def lowerCAmelCase__ ( self , __UpperCamelCase )-> [Memory, MemorySummary]:
logger.info(
"Note that TensorFlow allocates more memory than "
"it might need to speed up computation. "
"The memory reported here corresponds to the memory "
"reported by `nvidia-smi`, which can vary depending "
"on total available memory on the GPU that is used." )
with self.args.strategy.scope():
try:
if self.args.trace_memory_line_by_line:
if not self.args.eager_mode:
raise ValueError(
"`args.eager_mode` is set to `False`. Make sure to run model in eager mode to measure memory"
" consumption line by line." )
UpperCAmelCase__ : List[str] = start_memory_tracing("transformers" )
if self.args.is_tpu:
# tpu
raise NotImplementedError(
"Memory Benchmarking is currently not implemented for TPU. Please disable memory benchmarking"
" with `args.memory=False`" )
elif self.args.is_gpu:
# gpu
if not is_pyanvml_available():
logger.warning(
"py3nvml not installed, we won't log GPU memory usage. "
"Install py3nvml (pip install py3nvml) to log information about GPU." )
UpperCAmelCase__ : Optional[int] = "N/A"
else:
logger.info(
"Measuring total GPU usage on GPU device. Make sure to not have additional processes"
" running on the same GPU." )
# init nvml
nvml.nvmlInit()
func()
UpperCAmelCase__ : Any = nvml.nvmlDeviceGetHandleByIndex(self.args.device_idx )
UpperCAmelCase__ : Optional[int] = nvml.nvmlDeviceGetMemoryInfo(__UpperCamelCase )
UpperCAmelCase__ : str = meminfo.used
UpperCAmelCase__ : int = Memory(__UpperCamelCase )
# shutdown nvml
nvml.nvmlShutdown()
else:
# cpu
if self.args.trace_memory_line_by_line:
logger.info(
"When enabling line by line tracing, the max peak memory for CPU is inaccurate in"
" TensorFlow." )
UpperCAmelCase__ : Any = None
else:
UpperCAmelCase__ : List[Any] = measure_peak_memory_cpu(__UpperCamelCase )
UpperCAmelCase__ : Optional[Any] = Memory(__UpperCamelCase ) if isinstance(__UpperCamelCase , __UpperCamelCase ) else memory_bytes
if self.args.trace_memory_line_by_line:
UpperCAmelCase__ : Optional[Any] = stop_memory_tracing(__UpperCamelCase )
if memory is None:
UpperCAmelCase__ : Tuple = summary.total
else:
UpperCAmelCase__ : int = None
return memory, summary
except ResourceExhaustedError as e:
self.print_fn(F"Doesn't fit on GPU. {e}" )
return "N/A", None
| 660 | 1 |
"""simple docstring"""
from __future__ import annotations
import typing
from collections.abc import Iterable
import numpy as np
A__ : Optional[int] = typing.Union[Iterable[float], Iterable[int], np.ndarray] # noqa: UP007
A__ : Dict = typing.Union[np.floataa, int, float] # noqa: UP007
def a__ ( lowerCAmelCase : Vector , lowerCAmelCase : Vector ):
'''simple docstring'''
return np.sqrt(np.sum((np.asarray(lowerCAmelCase ) - np.asarray(lowerCAmelCase )) ** 2 ) )
def a__ ( lowerCAmelCase : Vector , lowerCAmelCase : Vector ):
'''simple docstring'''
return sum((va - va) ** 2 for va, va in zip(lowerCAmelCase , lowerCAmelCase ) ) ** (1 / 2)
if __name__ == "__main__":
def a__ ( ):
'''simple docstring'''
from timeit import timeit
print("Without Numpy" )
print(
timeit(
"euclidean_distance_no_np([1, 2, 3], [4, 5, 6])" , number=1_0000 , globals=globals() , ) )
print("With Numpy" )
print(
timeit(
"euclidean_distance([1, 2, 3], [4, 5, 6])" , number=1_0000 , globals=globals() , ) )
benchmark()
| 660 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ....utils import _LazyModule
A__ : List[str] = {"""tokenization_tapex""": ["""TapexTokenizer"""]}
if TYPE_CHECKING:
from .tokenization_tapex import TapexTokenizer
else:
import sys
A__ : Any = _LazyModule(__name__, globals()["""__file__"""], _import_structure)
| 660 | 1 |
"""simple docstring"""
import unittest
from transformers import AutoTokenizer, is_flax_available
from transformers.testing_utils import require_flax, require_sentencepiece, require_tokenizers, slow
if is_flax_available():
import jax.numpy as jnp
from transformers import FlaxXLMRobertaModel
@require_sentencepiece
@require_tokenizers
@require_flax
class _lowercase ( unittest.TestCase ):
'''simple docstring'''
@slow
def lowerCAmelCase__ ( self )-> List[Any]:
UpperCAmelCase__ : List[Any] = FlaxXLMRobertaModel.from_pretrained("xlm-roberta-base" )
UpperCAmelCase__ : Optional[Any] = AutoTokenizer.from_pretrained("xlm-roberta-base" )
UpperCAmelCase__ : str = "The dog is cute and lives in the garden house"
UpperCAmelCase__ : str = jnp.array([tokenizer.encode(__UpperCamelCase )] )
UpperCAmelCase__ : Optional[Any] = (1, 12, 7_68) # batch_size, sequence_length, embedding_vector_dim
UpperCAmelCase__ : Union[str, Any] = jnp.array(
[[-0.0101, 0.1218, -0.0803, 0.0801, 0.1327, 0.0776, -0.1215, 0.2383, 0.3338, 0.3106, 0.0300, 0.0252]] )
UpperCAmelCase__ : Tuple = model(__UpperCamelCase )["last_hidden_state"]
self.assertEqual(output.shape , __UpperCamelCase )
# compare the actual values for a slice of last dim
self.assertTrue(jnp.allclose(output[:, :, -1] , __UpperCamelCase , atol=1E-3 ) )
| 660 |
"""simple docstring"""
from typing import Optional, Union
import torch
from torch import nn
from ...configuration_utils import ConfigMixin, register_to_config
from ...models.modeling_utils import ModelMixin
class _lowercase ( lowerCAmelCase_ , lowerCAmelCase_ ):
'''simple docstring'''
@register_to_config
def __init__( self , __UpperCamelCase = 7_68 , )-> Union[str, Any]:
super().__init__()
UpperCAmelCase__ : str = nn.Parameter(torch.zeros(1 , __UpperCamelCase ) )
UpperCAmelCase__ : Optional[int] = nn.Parameter(torch.ones(1 , __UpperCamelCase ) )
def lowerCAmelCase__ ( self , __UpperCamelCase = None , __UpperCamelCase = None , )-> Any:
UpperCAmelCase__ : Dict = nn.Parameter(self.mean.to(__UpperCamelCase ).to(__UpperCamelCase ) )
UpperCAmelCase__ : Any = nn.Parameter(self.std.to(__UpperCamelCase ).to(__UpperCamelCase ) )
return self
def lowerCAmelCase__ ( self , __UpperCamelCase )-> Union[str, Any]:
UpperCAmelCase__ : Dict = (embeds - self.mean) * 1.0 / self.std
return embeds
def lowerCAmelCase__ ( self , __UpperCamelCase )-> List[Any]:
UpperCAmelCase__ : Any = (embeds * self.std) + self.mean
return embeds
| 660 | 1 |
"""simple docstring"""
import math
from enum import Enum
from typing import Optional, Union
from torch.optim import Optimizer
from torch.optim.lr_scheduler import LambdaLR
from .utils import logging
A__ : int = logging.get_logger(__name__)
class _lowercase ( lowerCAmelCase_ ):
'''simple docstring'''
_A = 'linear'
_A = 'cosine'
_A = 'cosine_with_restarts'
_A = 'polynomial'
_A = 'constant'
_A = 'constant_with_warmup'
_A = 'piecewise_constant'
def a__ ( lowerCAmelCase : Optimizer , lowerCAmelCase : int = -1 ):
'''simple docstring'''
return LambdaLR(lowerCAmelCase , lambda lowerCAmelCase : 1 , last_epoch=lowerCAmelCase )
def a__ ( lowerCAmelCase : Optimizer , lowerCAmelCase : int , lowerCAmelCase : int = -1 ):
'''simple docstring'''
def lr_lambda(lowerCAmelCase : int ):
if current_step < num_warmup_steps:
return float(lowerCAmelCase ) / float(max(1.0 , lowerCAmelCase ) )
return 1.0
return LambdaLR(lowerCAmelCase , lowerCAmelCase , last_epoch=lowerCAmelCase )
def a__ ( lowerCAmelCase : Optimizer , lowerCAmelCase : str , lowerCAmelCase : int = -1 ):
'''simple docstring'''
UpperCAmelCase__ : Union[str, Any] = {}
UpperCAmelCase__ : Tuple = step_rules.split("," )
for rule_str in rule_list[:-1]:
UpperCAmelCase__ , UpperCAmelCase__ : Dict = rule_str.split(":" )
UpperCAmelCase__ : Any = int(lowerCAmelCase )
UpperCAmelCase__ : Dict = float(lowerCAmelCase )
UpperCAmelCase__ : Union[str, Any] = value
UpperCAmelCase__ : Optional[int] = float(rule_list[-1] )
def create_rules_function(lowerCAmelCase : List[Any] , lowerCAmelCase : Tuple ):
def rule_func(lowerCAmelCase : int ) -> float:
UpperCAmelCase__ : Any = sorted(rules_dict.keys() )
for i, sorted_step in enumerate(lowerCAmelCase ):
if steps < sorted_step:
return rules_dict[sorted_steps[i]]
return last_lr_multiple
return rule_func
UpperCAmelCase__ : int = create_rules_function(lowerCAmelCase , lowerCAmelCase )
return LambdaLR(lowerCAmelCase , lowerCAmelCase , last_epoch=lowerCAmelCase )
def a__ ( lowerCAmelCase : Any , lowerCAmelCase : str , lowerCAmelCase : str , lowerCAmelCase : str=-1 ):
'''simple docstring'''
def lr_lambda(lowerCAmelCase : int ):
if current_step < num_warmup_steps:
return float(lowerCAmelCase ) / float(max(1 , lowerCAmelCase ) )
return max(
0.0 , float(num_training_steps - current_step ) / float(max(1 , num_training_steps - num_warmup_steps ) ) )
return LambdaLR(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
def a__ ( lowerCAmelCase : Optimizer , lowerCAmelCase : int , lowerCAmelCase : int , lowerCAmelCase : float = 0.5 , lowerCAmelCase : int = -1 ):
'''simple docstring'''
def lr_lambda(lowerCAmelCase : int ):
if current_step < num_warmup_steps:
return float(lowerCAmelCase ) / float(max(1 , lowerCAmelCase ) )
UpperCAmelCase__ : List[str] = float(current_step - num_warmup_steps ) / float(max(1 , num_training_steps - num_warmup_steps ) )
return max(0.0 , 0.5 * (1.0 + math.cos(math.pi * float(lowerCAmelCase ) * 2.0 * progress )) )
return LambdaLR(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
def a__ ( lowerCAmelCase : Optimizer , lowerCAmelCase : int , lowerCAmelCase : int , lowerCAmelCase : int = 1 , lowerCAmelCase : int = -1 ):
'''simple docstring'''
def lr_lambda(lowerCAmelCase : Tuple ):
if current_step < num_warmup_steps:
return float(lowerCAmelCase ) / float(max(1 , lowerCAmelCase ) )
UpperCAmelCase__ : int = float(current_step - num_warmup_steps ) / float(max(1 , num_training_steps - num_warmup_steps ) )
if progress >= 1.0:
return 0.0
return max(0.0 , 0.5 * (1.0 + math.cos(math.pi * ((float(lowerCAmelCase ) * progress) % 1.0) )) )
return LambdaLR(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
def a__ ( lowerCAmelCase : Optional[int] , lowerCAmelCase : List[Any] , lowerCAmelCase : List[str] , lowerCAmelCase : Any=1E-7 , lowerCAmelCase : Union[str, Any]=1.0 , lowerCAmelCase : List[Any]=-1 ):
'''simple docstring'''
UpperCAmelCase__ : Union[str, Any] = optimizer.defaults["lr"]
if not (lr_init > lr_end):
raise ValueError(F"lr_end ({lr_end}) must be be smaller than initial lr ({lr_init})" )
def lr_lambda(lowerCAmelCase : int ):
if current_step < num_warmup_steps:
return float(lowerCAmelCase ) / float(max(1 , lowerCAmelCase ) )
elif current_step > num_training_steps:
return lr_end / lr_init # as LambdaLR multiplies by lr_init
else:
UpperCAmelCase__ : Tuple = lr_init - lr_end
UpperCAmelCase__ : Optional[int] = num_training_steps - num_warmup_steps
UpperCAmelCase__ : Optional[Any] = 1 - (current_step - num_warmup_steps) / decay_steps
UpperCAmelCase__ : Optional[int] = lr_range * pct_remaining**power + lr_end
return decay / lr_init # as LambdaLR multiplies by lr_init
return LambdaLR(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
A__ : str = {
SchedulerType.LINEAR: get_linear_schedule_with_warmup,
SchedulerType.COSINE: get_cosine_schedule_with_warmup,
SchedulerType.COSINE_WITH_RESTARTS: get_cosine_with_hard_restarts_schedule_with_warmup,
SchedulerType.POLYNOMIAL: get_polynomial_decay_schedule_with_warmup,
SchedulerType.CONSTANT: get_constant_schedule,
SchedulerType.CONSTANT_WITH_WARMUP: get_constant_schedule_with_warmup,
SchedulerType.PIECEWISE_CONSTANT: get_piecewise_constant_schedule,
}
def a__ ( lowerCAmelCase : Union[str, SchedulerType] , lowerCAmelCase : Optimizer , lowerCAmelCase : Optional[str] = None , lowerCAmelCase : Optional[int] = None , lowerCAmelCase : Optional[int] = None , lowerCAmelCase : int = 1 , lowerCAmelCase : float = 1.0 , lowerCAmelCase : int = -1 , ):
'''simple docstring'''
UpperCAmelCase__ : Optional[Any] = SchedulerType(lowerCAmelCase )
UpperCAmelCase__ : Tuple = TYPE_TO_SCHEDULER_FUNCTION[name]
if name == SchedulerType.CONSTANT:
return schedule_func(lowerCAmelCase , last_epoch=lowerCAmelCase )
if name == SchedulerType.PIECEWISE_CONSTANT:
return schedule_func(lowerCAmelCase , step_rules=lowerCAmelCase , last_epoch=lowerCAmelCase )
# All other schedulers require `num_warmup_steps`
if num_warmup_steps is None:
raise ValueError(F"{name} requires `num_warmup_steps`, please provide that argument." )
if name == SchedulerType.CONSTANT_WITH_WARMUP:
return schedule_func(lowerCAmelCase , num_warmup_steps=lowerCAmelCase , last_epoch=lowerCAmelCase )
# All other schedulers require `num_training_steps`
if num_training_steps is None:
raise ValueError(F"{name} requires `num_training_steps`, please provide that argument." )
if name == SchedulerType.COSINE_WITH_RESTARTS:
return schedule_func(
lowerCAmelCase , num_warmup_steps=lowerCAmelCase , num_training_steps=lowerCAmelCase , num_cycles=lowerCAmelCase , last_epoch=lowerCAmelCase , )
if name == SchedulerType.POLYNOMIAL:
return schedule_func(
lowerCAmelCase , num_warmup_steps=lowerCAmelCase , num_training_steps=lowerCAmelCase , power=lowerCAmelCase , last_epoch=lowerCAmelCase , )
return schedule_func(
lowerCAmelCase , num_warmup_steps=lowerCAmelCase , num_training_steps=lowerCAmelCase , last_epoch=lowerCAmelCase )
| 660 |
"""simple docstring"""
import argparse
import torch
from transformers import GPTaConfig, GPTaModel, load_tf_weights_in_gpta
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
logging.set_verbosity_info()
def a__ ( lowerCAmelCase : List[Any] , lowerCAmelCase : Any , lowerCAmelCase : List[Any] ):
'''simple docstring'''
# Construct model
if gpta_config_file == "":
UpperCAmelCase__ : Optional[int] = GPTaConfig()
else:
UpperCAmelCase__ : Dict = GPTaConfig.from_json_file(lowerCAmelCase )
UpperCAmelCase__ : Optional[Any] = GPTaModel(lowerCAmelCase )
# Load weights from numpy
load_tf_weights_in_gpta(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
# Save pytorch-model
UpperCAmelCase__ : Optional[int] = pytorch_dump_folder_path + "/" + WEIGHTS_NAME
UpperCAmelCase__ : Any = pytorch_dump_folder_path + "/" + CONFIG_NAME
print(F"Save PyTorch model to {pytorch_weights_dump_path}" )
torch.save(model.state_dict() , lowerCAmelCase )
print(F"Save configuration file to {pytorch_config_dump_path}" )
with open(lowerCAmelCase , "w" , encoding="utf-8" ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
A__ : Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--gpt2_checkpoint_path""", default=None, type=str, required=True, help="""Path to the TensorFlow checkpoint path."""
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
parser.add_argument(
"""--gpt2_config_file""",
default="""""",
type=str,
help=(
"""An optional config json file corresponding to the pre-trained OpenAI model. \n"""
"""This specifies the model architecture."""
),
)
A__ : Optional[Any] = parser.parse_args()
convert_gpta_checkpoint_to_pytorch(args.gpta_checkpoint_path, args.gpta_config_file, args.pytorch_dump_folder_path)
| 660 | 1 |
"""simple docstring"""
import pytest
import datasets
# Import fixture modules as plugins
A__ : List[Any] = ["""tests.fixtures.files""", """tests.fixtures.hub""", """tests.fixtures.fsspec"""]
def a__ ( lowerCAmelCase : Dict , lowerCAmelCase : List[str] ):
'''simple docstring'''
# Mark tests as "unit" by default if not marked as "integration" (or already marked as "unit")
for item in items:
if any(marker in item.keywords for marker in ["integration", "unit"] ):
continue
item.add_marker(pytest.mark.unit )
def a__ ( lowerCAmelCase : Optional[Any] ):
'''simple docstring'''
config.addinivalue_line("markers" , "torchaudio_latest: mark test to run with torchaudio>=0.12" )
@pytest.fixture(autouse=lowerCAmelCase )
def a__ ( lowerCAmelCase : Union[str, Any] , lowerCAmelCase : int ):
'''simple docstring'''
# test_hf_cache_home = tmp_path_factory.mktemp("cache") # TODO: why a cache dir per test function does not work?
UpperCAmelCase__ : Union[str, Any] = tmp_path_factory.getbasetemp() / "cache"
UpperCAmelCase__ : Optional[Any] = test_hf_cache_home / "datasets"
UpperCAmelCase__ : List[Any] = test_hf_cache_home / "metrics"
UpperCAmelCase__ : List[Any] = test_hf_cache_home / "modules"
monkeypatch.setattr("datasets.config.HF_DATASETS_CACHE" , str(lowerCAmelCase ) )
monkeypatch.setattr("datasets.config.HF_METRICS_CACHE" , str(lowerCAmelCase ) )
monkeypatch.setattr("datasets.config.HF_MODULES_CACHE" , str(lowerCAmelCase ) )
UpperCAmelCase__ : str = test_hf_datasets_cache / "downloads"
monkeypatch.setattr("datasets.config.DOWNLOADED_DATASETS_PATH" , str(lowerCAmelCase ) )
UpperCAmelCase__ : str = test_hf_datasets_cache / "downloads" / "extracted"
monkeypatch.setattr("datasets.config.EXTRACTED_DATASETS_PATH" , str(lowerCAmelCase ) )
@pytest.fixture(autouse=lowerCAmelCase , scope="session" )
def a__ ( ):
'''simple docstring'''
datasets.disable_progress_bar()
@pytest.fixture(autouse=lowerCAmelCase )
def a__ ( lowerCAmelCase : Optional[int] ):
'''simple docstring'''
# don't take tests into account when counting downloads
monkeypatch.setattr("datasets.config.HF_UPDATE_DOWNLOAD_COUNTS" , lowerCAmelCase )
@pytest.fixture
def a__ ( lowerCAmelCase : str ):
'''simple docstring'''
# Required to suppress RemovedIn20Warning when feature(s) are not compatible with SQLAlchemy 2.0
# To be removed once SQLAlchemy 2.0 supported
monkeypatch.setattr("sqlalchemy.util.deprecations.SILENCE_UBER_WARNING" , lowerCAmelCase )
| 660 |
"""simple docstring"""
import argparse
import os
import torch
from transformers.utils import WEIGHTS_NAME
A__ : Optional[int] = ["""small""", """medium""", """large"""]
A__ : Optional[int] = """lm_head.decoder.weight"""
A__ : Dict = """lm_head.weight"""
def a__ ( lowerCAmelCase : str , lowerCAmelCase : str ):
'''simple docstring'''
UpperCAmelCase__ : Dict = torch.load(lowerCAmelCase )
UpperCAmelCase__ : Optional[int] = d.pop(lowerCAmelCase )
os.makedirs(lowerCAmelCase , exist_ok=lowerCAmelCase )
torch.save(lowerCAmelCase , os.path.join(lowerCAmelCase , lowerCAmelCase ) )
if __name__ == "__main__":
A__ : List[Any] = argparse.ArgumentParser()
parser.add_argument("""--dialogpt_path""", default=""".""", type=str)
A__ : Tuple = parser.parse_args()
for MODEL in DIALOGPT_MODELS:
A__ : Tuple = os.path.join(args.dialogpt_path, f"""{MODEL}_ft.pkl""")
A__ : str = f"""./DialoGPT-{MODEL}"""
convert_dialogpt_checkpoint(
checkpoint_path,
pytorch_dump_folder_path,
)
| 660 | 1 |
"""simple docstring"""
import torch
from diffusers import UnCLIPScheduler
from .test_schedulers import SchedulerCommonTest
class _lowercase ( lowerCAmelCase_ ):
'''simple docstring'''
_A = (UnCLIPScheduler,)
def lowerCAmelCase__ ( self , **__UpperCamelCase )-> int:
UpperCAmelCase__ : Any = {
"num_train_timesteps": 10_00,
"variance_type": "fixed_small_log",
"clip_sample": True,
"clip_sample_range": 1.0,
"prediction_type": "epsilon",
}
config.update(**__UpperCamelCase )
return config
def lowerCAmelCase__ ( self )-> int:
for timesteps in [1, 5, 1_00, 10_00]:
self.check_over_configs(num_train_timesteps=__UpperCamelCase )
def lowerCAmelCase__ ( self )-> Optional[Any]:
for variance in ["fixed_small_log", "learned_range"]:
self.check_over_configs(variance_type=__UpperCamelCase )
def lowerCAmelCase__ ( self )-> str:
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=__UpperCamelCase )
def lowerCAmelCase__ ( self )-> Any:
for clip_sample_range in [1, 5, 10, 20]:
self.check_over_configs(clip_sample_range=__UpperCamelCase )
def lowerCAmelCase__ ( self )-> Dict:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(prediction_type=__UpperCamelCase )
def lowerCAmelCase__ ( self )-> List[Any]:
for time_step in [0, 5_00, 9_99]:
for prev_timestep in [None, 5, 1_00, 2_50, 5_00, 7_50]:
if prev_timestep is not None and prev_timestep >= time_step:
continue
self.check_over_forward(time_step=__UpperCamelCase , prev_timestep=__UpperCamelCase )
def lowerCAmelCase__ ( self )-> Any:
UpperCAmelCase__ : int = self.scheduler_classes[0]
UpperCAmelCase__ : Optional[Any] = self.get_scheduler_config(variance_type="fixed_small_log" )
UpperCAmelCase__ : Tuple = scheduler_class(**__UpperCamelCase )
assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 1.00_00E-10 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(4_87 ) - 0.054_9625 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(9_99 ) - 0.999_4987 ) ) < 1E-5
def lowerCAmelCase__ ( self )-> Optional[int]:
UpperCAmelCase__ : List[str] = self.scheduler_classes[0]
UpperCAmelCase__ : Optional[int] = self.get_scheduler_config(variance_type="learned_range" )
UpperCAmelCase__ : str = scheduler_class(**__UpperCamelCase )
UpperCAmelCase__ : Optional[int] = 0.5
assert scheduler._get_variance(1 , predicted_variance=__UpperCamelCase ) - -10.171_2790 < 1E-5
assert scheduler._get_variance(4_87 , predicted_variance=__UpperCamelCase ) - -5.799_8052 < 1E-5
assert scheduler._get_variance(9_99 , predicted_variance=__UpperCamelCase ) - -0.001_0011 < 1E-5
def lowerCAmelCase__ ( self )-> Any:
UpperCAmelCase__ : Dict = self.scheduler_classes[0]
UpperCAmelCase__ : Any = self.get_scheduler_config()
UpperCAmelCase__ : Any = scheduler_class(**__UpperCamelCase )
UpperCAmelCase__ : List[Any] = scheduler.timesteps
UpperCAmelCase__ : Tuple = self.dummy_model()
UpperCAmelCase__ : int = self.dummy_sample_deter
UpperCAmelCase__ : Optional[int] = torch.manual_seed(0 )
for i, t in enumerate(__UpperCamelCase ):
# 1. predict noise residual
UpperCAmelCase__ : List[str] = model(__UpperCamelCase , __UpperCamelCase )
# 2. predict previous mean of sample x_t-1
UpperCAmelCase__ : Dict = scheduler.step(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , generator=__UpperCamelCase ).prev_sample
UpperCAmelCase__ : List[Any] = pred_prev_sample
UpperCAmelCase__ : Tuple = torch.sum(torch.abs(__UpperCamelCase ) )
UpperCAmelCase__ : List[str] = torch.mean(torch.abs(__UpperCamelCase ) )
assert abs(result_sum.item() - 252.268_2495 ) < 1E-2
assert abs(result_mean.item() - 0.328_4743 ) < 1E-3
def lowerCAmelCase__ ( self )-> Tuple:
UpperCAmelCase__ : List[Any] = self.scheduler_classes[0]
UpperCAmelCase__ : Dict = self.get_scheduler_config()
UpperCAmelCase__ : str = scheduler_class(**__UpperCamelCase )
scheduler.set_timesteps(25 )
UpperCAmelCase__ : List[str] = scheduler.timesteps
UpperCAmelCase__ : List[str] = self.dummy_model()
UpperCAmelCase__ : List[str] = self.dummy_sample_deter
UpperCAmelCase__ : Union[str, Any] = torch.manual_seed(0 )
for i, t in enumerate(__UpperCamelCase ):
# 1. predict noise residual
UpperCAmelCase__ : List[Any] = model(__UpperCamelCase , __UpperCamelCase )
if i + 1 == timesteps.shape[0]:
UpperCAmelCase__ : Any = None
else:
UpperCAmelCase__ : Optional[Any] = timesteps[i + 1]
# 2. predict previous mean of sample x_t-1
UpperCAmelCase__ : Any = scheduler.step(
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , prev_timestep=__UpperCamelCase , generator=__UpperCamelCase ).prev_sample
UpperCAmelCase__ : Tuple = pred_prev_sample
UpperCAmelCase__ : Optional[int] = torch.sum(torch.abs(__UpperCamelCase ) )
UpperCAmelCase__ : Optional[Any] = torch.mean(torch.abs(__UpperCamelCase ) )
assert abs(result_sum.item() - 258.204_4983 ) < 1E-2
assert abs(result_mean.item() - 0.336_2038 ) < 1E-3
def lowerCAmelCase__ ( self )-> Tuple:
pass
def lowerCAmelCase__ ( self )-> Union[str, Any]:
pass
| 660 |
"""simple docstring"""
from math import isqrt
def a__ ( lowerCAmelCase : int ):
'''simple docstring'''
UpperCAmelCase__ : Tuple = [True] * max_number
for i in range(2 , isqrt(max_number - 1 ) + 1 ):
if is_prime[i]:
for j in range(i**2 , lowerCAmelCase , lowerCAmelCase ):
UpperCAmelCase__ : List[Any] = False
return [i for i in range(2 , lowerCAmelCase ) if is_prime[i]]
def a__ ( lowerCAmelCase : int = 10**8 ):
'''simple docstring'''
UpperCAmelCase__ : Dict = calculate_prime_numbers(max_number // 2 )
UpperCAmelCase__ : Optional[int] = 0
UpperCAmelCase__ : Optional[int] = 0
UpperCAmelCase__ : Tuple = len(lowerCAmelCase ) - 1
while left <= right:
while prime_numbers[left] * prime_numbers[right] >= max_number:
right -= 1
semiprimes_count += right - left + 1
left += 1
return semiprimes_count
if __name__ == "__main__":
print(f"""{solution() = }""")
| 660 | 1 |
"""simple docstring"""
from __future__ import annotations
def a__ ( lowerCAmelCase : str , lowerCAmelCase : str ):
'''simple docstring'''
UpperCAmelCase__ : Tuple = get_failure_array(lowerCAmelCase )
# 2) Step through text searching for pattern
UpperCAmelCase__ , UpperCAmelCase__ : Any = 0, 0 # index into text, pattern
while i < len(lowerCAmelCase ):
if pattern[j] == text[i]:
if j == (len(lowerCAmelCase ) - 1):
return True
j += 1
# if this is a prefix in our pattern
# just go back far enough to continue
elif j > 0:
UpperCAmelCase__ : Optional[Any] = failure[j - 1]
continue
i += 1
return False
def a__ ( lowerCAmelCase : str ):
'''simple docstring'''
UpperCAmelCase__ : Union[str, Any] = [0]
UpperCAmelCase__ : int = 0
UpperCAmelCase__ : List[Any] = 1
while j < len(lowerCAmelCase ):
if pattern[i] == pattern[j]:
i += 1
elif i > 0:
UpperCAmelCase__ : Union[str, Any] = failure[i - 1]
continue
j += 1
failure.append(lowerCAmelCase )
return failure
if __name__ == "__main__":
# Test 1)
A__ : Tuple = """abc1abc12"""
A__ : Tuple = """alskfjaldsabc1abc1abc12k23adsfabcabc"""
A__ : List[Any] = """alskfjaldsk23adsfabcabc"""
assert kmp(pattern, texta) and not kmp(pattern, texta)
# Test 2)
A__ : str = """ABABX"""
A__ : List[str] = """ABABZABABYABABX"""
assert kmp(pattern, text)
# Test 3)
A__ : Optional[int] = """AAAB"""
A__ : Optional[int] = """ABAAAAAB"""
assert kmp(pattern, text)
# Test 4)
A__ : Union[str, Any] = """abcdabcy"""
A__ : Tuple = """abcxabcdabxabcdabcdabcy"""
assert kmp(pattern, text)
# Test 5)
A__ : Tuple = """aabaabaaa"""
assert get_failure_array(pattern) == [0, 1, 0, 1, 2, 3, 4, 5, 2]
| 660 |
"""simple docstring"""
import unicodedata
from dataclasses import dataclass
from typing import Optional, Union
import numpy as np
from transformers.data.data_collator import DataCollatorMixin
from transformers.file_utils import PaddingStrategy
from transformers.tokenization_utils_base import PreTrainedTokenizerBase
def a__ ( lowerCAmelCase : str , lowerCAmelCase : Optional[Any] , lowerCAmelCase : Dict , lowerCAmelCase : List[Any] ):
'''simple docstring'''
if isinstance(lowerCAmelCase , lowerCAmelCase ):
UpperCAmelCase__ : Optional[int] = np.full((len(lowerCAmelCase ), sequence_length, 2) , lowerCAmelCase )
else:
UpperCAmelCase__ : Optional[Any] = np.full((len(lowerCAmelCase ), sequence_length) , lowerCAmelCase )
for i, tensor in enumerate(lowerCAmelCase ):
if padding_side == "right":
if isinstance(lowerCAmelCase , lowerCAmelCase ):
UpperCAmelCase__ : Dict = tensor[:sequence_length]
else:
UpperCAmelCase__ : Tuple = tensor[:sequence_length]
else:
if isinstance(lowerCAmelCase , lowerCAmelCase ):
UpperCAmelCase__ : Optional[Any] = tensor[:sequence_length]
else:
UpperCAmelCase__ : int = tensor[:sequence_length]
return out_tensor.tolist()
def a__ ( lowerCAmelCase : Optional[int] ):
'''simple docstring'''
UpperCAmelCase__ : Tuple = ord(lowerCAmelCase )
if (cp >= 33 and cp <= 47) or (cp >= 58 and cp <= 64) or (cp >= 91 and cp <= 96) or (cp >= 123 and cp <= 126):
return True
UpperCAmelCase__ : Optional[Any] = unicodedata.category(lowerCAmelCase )
if cat.startswith("P" ):
return True
return False
@dataclass
class _lowercase ( lowerCAmelCase_ ):
'''simple docstring'''
_A = 42
_A = True
_A = None
_A = None
_A = -100
_A = "pt"
def lowerCAmelCase__ ( self , __UpperCamelCase )-> List[str]:
import torch
UpperCAmelCase__ : Optional[Any] = "label" if "label" in features[0].keys() else "labels"
UpperCAmelCase__ : Dict = [feature[label_name] for feature in features] if label_name in features[0].keys() else None
UpperCAmelCase__ : str = self.tokenizer.pad(
__UpperCamelCase , padding=self.padding , max_length=self.max_length , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors="pt" if labels is None else None , )
if labels is None:
return batch
UpperCAmelCase__ : Optional[Any] = torch.tensor(batch["entity_ids"] ).shape[1]
UpperCAmelCase__ : int = self.tokenizer.padding_side
if padding_side == "right":
UpperCAmelCase__ : int = [
list(__UpperCamelCase ) + [self.label_pad_token_id] * (sequence_length - len(__UpperCamelCase )) for label in labels
]
else:
UpperCAmelCase__ : List[Any] = [
[self.label_pad_token_id] * (sequence_length - len(__UpperCamelCase )) + list(__UpperCamelCase ) for label in labels
]
UpperCAmelCase__ : Optional[Any] = [feature["ner_tags"] for feature in features]
UpperCAmelCase__ : int = padding_tensor(__UpperCamelCase , -1 , __UpperCamelCase , __UpperCamelCase )
UpperCAmelCase__ : List[Any] = [feature["original_entity_spans"] for feature in features]
UpperCAmelCase__ : int = padding_tensor(__UpperCamelCase , (-1, -1) , __UpperCamelCase , __UpperCamelCase )
UpperCAmelCase__ : Optional[int] = {k: torch.tensor(__UpperCamelCase , dtype=torch.intaa ) for k, v in batch.items()}
return batch
| 660 | 1 |
"""simple docstring"""
import math
def a__ ( lowerCAmelCase : int ):
'''simple docstring'''
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(lowerCAmelCase ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def a__ ( lowerCAmelCase : int = 1_0001 ):
'''simple docstring'''
try:
UpperCAmelCase__ : List[str] = int(lowerCAmelCase )
except (TypeError, ValueError):
raise TypeError("Parameter nth must be int or castable to int." ) from None
if nth <= 0:
raise ValueError("Parameter nth must be greater than or equal to one." )
UpperCAmelCase__ : list[int] = []
UpperCAmelCase__ : str = 2
while len(lowerCAmelCase ) < nth:
if is_prime(lowerCAmelCase ):
primes.append(lowerCAmelCase )
num += 1
else:
num += 1
return primes[len(lowerCAmelCase ) - 1]
if __name__ == "__main__":
print(f"""{solution() = }""")
| 660 |
"""simple docstring"""
import timeit
import numpy as np
import datasets
from datasets.arrow_writer import ArrowWriter
from datasets.features.features import _ArrayXD
def a__ ( lowerCAmelCase : List[str] ):
'''simple docstring'''
def wrapper(*lowerCAmelCase : Any , **lowerCAmelCase : Tuple ):
UpperCAmelCase__ : Optional[int] = timeit.default_timer()
UpperCAmelCase__ : int = func(*lowerCAmelCase , **lowerCAmelCase )
UpperCAmelCase__ : List[Any] = timeit.default_timer() - starttime
return delta
UpperCAmelCase__ : int = func.__name__
return wrapper
def a__ ( lowerCAmelCase : dict , lowerCAmelCase : Optional[int]=100 , lowerCAmelCase : List[str]=None ):
'''simple docstring'''
UpperCAmelCase__ : str = []
UpperCAmelCase__ : Optional[Any] = seq_shapes or {}
for i in range(lowerCAmelCase ):
UpperCAmelCase__ : int = {}
for col_id, (k, v) in enumerate(features.items() ):
if isinstance(lowerCAmelCase , _ArrayXD ):
UpperCAmelCase__ : List[str] = np.random.rand(*v.shape ).astype(v.dtype )
elif isinstance(lowerCAmelCase , datasets.Value ):
if v.dtype == "string":
UpperCAmelCase__ : Dict = "The small grey turtle was surprisingly fast when challenged."
else:
UpperCAmelCase__ : str = np.random.randint(10 , size=1 ).astype(v.dtype ).item()
elif isinstance(lowerCAmelCase , datasets.Sequence ):
while isinstance(lowerCAmelCase , datasets.Sequence ):
UpperCAmelCase__ : List[str] = v.feature
UpperCAmelCase__ : Optional[int] = seq_shapes[k]
UpperCAmelCase__ : Optional[int] = np.random.rand(*lowerCAmelCase ).astype(v.dtype )
UpperCAmelCase__ : Union[str, Any] = data
dummy_data.append((i, example) )
return dummy_data
def a__ ( lowerCAmelCase : List[str] , lowerCAmelCase : Tuple , lowerCAmelCase : List[str]=100 , lowerCAmelCase : Optional[int]=None ):
'''simple docstring'''
UpperCAmelCase__ : int = generate_examples(lowerCAmelCase , num_examples=lowerCAmelCase , seq_shapes=lowerCAmelCase )
with ArrowWriter(features=lowerCAmelCase , path=lowerCAmelCase ) as writer:
for key, record in dummy_data:
UpperCAmelCase__ : List[Any] = features.encode_example(lowerCAmelCase )
writer.write(lowerCAmelCase )
UpperCAmelCase__ , UpperCAmelCase__ : Optional[Any] = writer.finalize()
if not num_final_examples == num_examples:
raise ValueError(
F"Error writing the dataset, wrote {num_final_examples} examples but should have written {num_examples}." )
UpperCAmelCase__ : Optional[int] = datasets.Dataset.from_file(filename=lowerCAmelCase , info=datasets.DatasetInfo(features=lowerCAmelCase ) )
return dataset
| 660 | 1 |
"""simple docstring"""
import copy
import os
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Dict, Mapping, Optional, Union
if TYPE_CHECKING:
from ...processing_utils import ProcessorMixin
from ...utils import TensorType
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
A__ : Optional[int] = logging.get_logger(__name__)
A__ : Dict = {
"""google/owlvit-base-patch32""": """https://huggingface.co/google/owlvit-base-patch32/resolve/main/config.json""",
"""google/owlvit-base-patch16""": """https://huggingface.co/google/owlvit-base-patch16/resolve/main/config.json""",
"""google/owlvit-large-patch14""": """https://huggingface.co/google/owlvit-large-patch14/resolve/main/config.json""",
}
class _lowercase ( lowerCAmelCase_ ):
'''simple docstring'''
_A = 'owlvit_text_model'
def __init__( self , __UpperCamelCase=4_94_08 , __UpperCamelCase=5_12 , __UpperCamelCase=20_48 , __UpperCamelCase=12 , __UpperCamelCase=8 , __UpperCamelCase=16 , __UpperCamelCase="quick_gelu" , __UpperCamelCase=1E-5 , __UpperCamelCase=0.0 , __UpperCamelCase=0.02 , __UpperCamelCase=1.0 , __UpperCamelCase=0 , __UpperCamelCase=4_94_06 , __UpperCamelCase=4_94_07 , **__UpperCamelCase , )-> Any:
super().__init__(pad_token_id=__UpperCamelCase , bos_token_id=__UpperCamelCase , eos_token_id=__UpperCamelCase , **__UpperCamelCase )
UpperCAmelCase__ : Dict = vocab_size
UpperCAmelCase__ : int = hidden_size
UpperCAmelCase__ : List[Any] = intermediate_size
UpperCAmelCase__ : Any = num_hidden_layers
UpperCAmelCase__ : str = num_attention_heads
UpperCAmelCase__ : Union[str, Any] = max_position_embeddings
UpperCAmelCase__ : List[str] = hidden_act
UpperCAmelCase__ : Tuple = layer_norm_eps
UpperCAmelCase__ : Tuple = attention_dropout
UpperCAmelCase__ : str = initializer_range
UpperCAmelCase__ : str = initializer_factor
@classmethod
def lowerCAmelCase__ ( cls , __UpperCamelCase , **__UpperCamelCase )-> "PretrainedConfig":
cls._set_token_in_kwargs(__UpperCamelCase )
UpperCAmelCase__ , UpperCAmelCase__ : Optional[int] = cls.get_config_dict(__UpperCamelCase , **__UpperCamelCase )
# get the text config dict if we are loading from OwlViTConfig
if config_dict.get("model_type" ) == "owlvit":
UpperCAmelCase__ : List[str] = config_dict["text_config"]
if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F"You are using a model of type {config_dict['model_type']} to instantiate a model of type "
F"{cls.model_type}. This is not supported for all configurations of models and can yield errors." )
return cls.from_dict(__UpperCamelCase , **__UpperCamelCase )
class _lowercase ( lowerCAmelCase_ ):
'''simple docstring'''
_A = 'owlvit_vision_model'
def __init__( self , __UpperCamelCase=7_68 , __UpperCamelCase=30_72 , __UpperCamelCase=12 , __UpperCamelCase=12 , __UpperCamelCase=3 , __UpperCamelCase=7_68 , __UpperCamelCase=32 , __UpperCamelCase="quick_gelu" , __UpperCamelCase=1E-5 , __UpperCamelCase=0.0 , __UpperCamelCase=0.02 , __UpperCamelCase=1.0 , **__UpperCamelCase , )-> List[Any]:
super().__init__(**__UpperCamelCase )
UpperCAmelCase__ : Tuple = hidden_size
UpperCAmelCase__ : Dict = intermediate_size
UpperCAmelCase__ : str = num_hidden_layers
UpperCAmelCase__ : List[Any] = num_attention_heads
UpperCAmelCase__ : str = num_channels
UpperCAmelCase__ : Union[str, Any] = image_size
UpperCAmelCase__ : Optional[Any] = patch_size
UpperCAmelCase__ : Tuple = hidden_act
UpperCAmelCase__ : Any = layer_norm_eps
UpperCAmelCase__ : Any = attention_dropout
UpperCAmelCase__ : Any = initializer_range
UpperCAmelCase__ : List[Any] = initializer_factor
@classmethod
def lowerCAmelCase__ ( cls , __UpperCamelCase , **__UpperCamelCase )-> "PretrainedConfig":
cls._set_token_in_kwargs(__UpperCamelCase )
UpperCAmelCase__ , UpperCAmelCase__ : Tuple = cls.get_config_dict(__UpperCamelCase , **__UpperCamelCase )
# get the vision config dict if we are loading from OwlViTConfig
if config_dict.get("model_type" ) == "owlvit":
UpperCAmelCase__ : List[str] = config_dict["vision_config"]
if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F"You are using a model of type {config_dict['model_type']} to instantiate a model of type "
F"{cls.model_type}. This is not supported for all configurations of models and can yield errors." )
return cls.from_dict(__UpperCamelCase , **__UpperCamelCase )
class _lowercase ( lowerCAmelCase_ ):
'''simple docstring'''
_A = 'owlvit'
_A = True
def __init__( self , __UpperCamelCase=None , __UpperCamelCase=None , __UpperCamelCase=5_12 , __UpperCamelCase=2.6592 , __UpperCamelCase=True , **__UpperCamelCase , )-> str:
super().__init__(**__UpperCamelCase )
if text_config is None:
UpperCAmelCase__ : List[str] = {}
logger.info("text_config is None. Initializing the OwlViTTextConfig with default values." )
if vision_config is None:
UpperCAmelCase__ : Any = {}
logger.info("vision_config is None. initializing the OwlViTVisionConfig with default values." )
UpperCAmelCase__ : Optional[int] = OwlViTTextConfig(**__UpperCamelCase )
UpperCAmelCase__ : Tuple = OwlViTVisionConfig(**__UpperCamelCase )
UpperCAmelCase__ : Any = projection_dim
UpperCAmelCase__ : int = logit_scale_init_value
UpperCAmelCase__ : Union[str, Any] = return_dict
UpperCAmelCase__ : Dict = 1.0
@classmethod
def lowerCAmelCase__ ( cls , __UpperCamelCase , **__UpperCamelCase )-> "PretrainedConfig":
cls._set_token_in_kwargs(__UpperCamelCase )
UpperCAmelCase__ , UpperCAmelCase__ : Tuple = cls.get_config_dict(__UpperCamelCase , **__UpperCamelCase )
if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F"You are using a model of type {config_dict['model_type']} to instantiate a model of type "
F"{cls.model_type}. This is not supported for all configurations of models and can yield errors." )
return cls.from_dict(__UpperCamelCase , **__UpperCamelCase )
@classmethod
def lowerCAmelCase__ ( cls , __UpperCamelCase , __UpperCamelCase , **__UpperCamelCase )-> Tuple:
UpperCAmelCase__ : Optional[Any] = {}
UpperCAmelCase__ : Union[str, Any] = text_config
UpperCAmelCase__ : Optional[Any] = vision_config
return cls.from_dict(__UpperCamelCase , **__UpperCamelCase )
def lowerCAmelCase__ ( self )-> List[str]:
UpperCAmelCase__ : Optional[Any] = copy.deepcopy(self.__dict__ )
UpperCAmelCase__ : Any = self.text_config.to_dict()
UpperCAmelCase__ : Optional[int] = self.vision_config.to_dict()
UpperCAmelCase__ : int = self.__class__.model_type
return output
class _lowercase ( lowerCAmelCase_ ):
'''simple docstring'''
@property
def lowerCAmelCase__ ( self )-> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
("input_ids", {0: "batch", 1: "sequence"}),
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
("attention_mask", {0: "batch", 1: "sequence"}),
] )
@property
def lowerCAmelCase__ ( self )-> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
("logits_per_image", {0: "batch"}),
("logits_per_text", {0: "batch"}),
("text_embeds", {0: "batch"}),
("image_embeds", {0: "batch"}),
] )
@property
def lowerCAmelCase__ ( self )-> float:
return 1E-4
def lowerCAmelCase__ ( self , __UpperCamelCase , __UpperCamelCase = -1 , __UpperCamelCase = -1 , __UpperCamelCase = None , )-> Mapping[str, Any]:
UpperCAmelCase__ : List[Any] = super().generate_dummy_inputs(
processor.tokenizer , batch_size=__UpperCamelCase , seq_length=__UpperCamelCase , framework=__UpperCamelCase )
UpperCAmelCase__ : str = super().generate_dummy_inputs(
processor.image_processor , batch_size=__UpperCamelCase , framework=__UpperCamelCase )
return {**text_input_dict, **image_input_dict}
@property
def lowerCAmelCase__ ( self )-> int:
return 14
| 660 |
"""simple docstring"""
from manim import *
class _lowercase ( lowerCAmelCase_ ):
'''simple docstring'''
def lowerCAmelCase__ ( self )-> Tuple:
UpperCAmelCase__ : str = Rectangle(height=0.5 , width=0.5 )
UpperCAmelCase__ : List[str] = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0 )
UpperCAmelCase__ : List[Any] = [mem.copy() for i in range(6 )]
UpperCAmelCase__ : Union[str, Any] = [mem.copy() for i in range(6 )]
UpperCAmelCase__ : List[Any] = VGroup(*__UpperCamelCase ).arrange(__UpperCamelCase , buff=0 )
UpperCAmelCase__ : int = VGroup(*__UpperCamelCase ).arrange(__UpperCamelCase , buff=0 )
UpperCAmelCase__ : Tuple = VGroup(__UpperCamelCase , __UpperCamelCase ).arrange(__UpperCamelCase , buff=0 )
UpperCAmelCase__ : Tuple = Text("CPU" , font_size=24 )
UpperCAmelCase__ : Any = Group(__UpperCamelCase , __UpperCamelCase ).arrange(__UpperCamelCase , buff=0.5 , aligned_edge=__UpperCamelCase )
cpu.move_to([-2.5, -0.5, 0] )
self.add(__UpperCamelCase )
UpperCAmelCase__ : List[Any] = [mem.copy() for i in range(4 )]
UpperCAmelCase__ : List[str] = VGroup(*__UpperCamelCase ).arrange(__UpperCamelCase , buff=0 )
UpperCAmelCase__ : Union[str, Any] = Text("GPU" , font_size=24 )
UpperCAmelCase__ : Dict = Group(__UpperCamelCase , __UpperCamelCase ).arrange(__UpperCamelCase , buff=0.5 , aligned_edge=__UpperCamelCase )
gpu.move_to([-1, -1, 0] )
self.add(__UpperCamelCase )
UpperCAmelCase__ : Optional[int] = [mem.copy() for i in range(6 )]
UpperCAmelCase__ : List[str] = VGroup(*__UpperCamelCase ).arrange(__UpperCamelCase , buff=0 )
UpperCAmelCase__ : Tuple = Text("Model" , font_size=24 )
UpperCAmelCase__ : Dict = Group(__UpperCamelCase , __UpperCamelCase ).arrange(__UpperCamelCase , buff=0.5 , aligned_edge=__UpperCamelCase )
model.move_to([3, -1.0, 0] )
self.add(__UpperCamelCase )
UpperCAmelCase__ : List[str] = []
for i, rect in enumerate(__UpperCamelCase ):
rect.set_stroke(__UpperCamelCase )
# target = fill.copy().set_fill(YELLOW, opacity=0.7)
# target.move_to(rect)
# self.add(target)
UpperCAmelCase__ : int = Rectangle(height=0.46 / 4 , width=0.46 / 3 ).set_stroke(width=0.0 ).set_fill(__UpperCamelCase , opacity=0.7 )
if i == 0:
cpu_target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT ) , buff=0.02 , direction=__UpperCamelCase )
cpu_target.set_x(cpu_target.get_x() + 0.1 )
elif i == 3:
cpu_target.next_to(cpu_targs[0] , direction=__UpperCamelCase , buff=0.0 )
else:
cpu_target.next_to(cpu_targs[i - 1] , direction=__UpperCamelCase , buff=0.0 )
self.add(__UpperCamelCase )
cpu_targs.append(__UpperCamelCase )
UpperCAmelCase__ : List[Any] = [mem.copy() for i in range(6 )]
UpperCAmelCase__ : Any = VGroup(*__UpperCamelCase ).arrange(__UpperCamelCase , buff=0 )
UpperCAmelCase__ : Tuple = Text("Loaded Checkpoint" , font_size=24 )
UpperCAmelCase__ : Any = Group(__UpperCamelCase , __UpperCamelCase ).arrange(__UpperCamelCase , aligned_edge=__UpperCamelCase , buff=0.4 )
checkpoint.move_to([3, 0.5, 0] )
UpperCAmelCase__ : Optional[Any] = Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
UpperCAmelCase__ : Any = MarkupText(
F"<b>Key:</b>\n\n<span fgcolor='{YELLOW}'>●</span> Empty Model" , font_size=18 , )
key_text.move_to([-5, 2.4, 0] )
self.add(__UpperCamelCase , __UpperCamelCase )
UpperCAmelCase__ : str = MarkupText(
F"<span fgcolor='{BLUE}'>●</span> Checkpoint" , font_size=18 , )
blue_text.next_to(__UpperCamelCase , DOWN * 2.4 , aligned_edge=key_text.get_left() )
UpperCAmelCase__ : Optional[Any] = MarkupText(
F"Next, a <i><span fgcolor=\"{BLUE}\">second</span></i> model is loaded into memory,\nwith the weights of a <span fgcolor=\"{BLUE}\">single shard</span>." , font_size=24 , )
step_a.move_to([2, 2, 0] )
self.play(Write(__UpperCamelCase ) , Write(__UpperCamelCase ) )
self.play(Write(__UpperCamelCase , run_time=1 ) , Create(__UpperCamelCase , run_time=1 ) )
UpperCAmelCase__ : Union[str, Any] = []
UpperCAmelCase__ : List[str] = []
for i, rect in enumerate(__UpperCamelCase ):
UpperCAmelCase__ : Optional[Any] = fill.copy().set_fill(__UpperCamelCase , opacity=0.7 )
target.move_to(__UpperCamelCase )
first_animations.append(GrowFromCenter(__UpperCamelCase , run_time=1 ) )
UpperCAmelCase__ : List[str] = target.copy()
cpu_target.generate_target()
if i < 5:
cpu_target.target.move_to(cpu_left_col_base[i + 1] )
else:
cpu_target.target.move_to(cpu_right_col_base[i - 5] )
second_animations.append(MoveToTarget(__UpperCamelCase , run_time=1.5 ) )
self.play(*__UpperCamelCase )
self.play(*__UpperCamelCase )
self.wait()
| 660 | 1 |
"""simple docstring"""
from __future__ import annotations
A__ : int = 10
def a__ ( lowerCAmelCase : list[int] ):
'''simple docstring'''
UpperCAmelCase__ : Optional[int] = 1
UpperCAmelCase__ : int = max(lowerCAmelCase )
while placement <= max_digit:
# declare and initialize empty buckets
UpperCAmelCase__ : list[list] = [[] for _ in range(lowerCAmelCase )]
# split list_of_ints between the buckets
for i in list_of_ints:
UpperCAmelCase__ : Tuple = int((i / placement) % RADIX )
buckets[tmp].append(lowerCAmelCase )
# put each buckets' contents into list_of_ints
UpperCAmelCase__ : List[Any] = 0
for b in range(lowerCAmelCase ):
for i in buckets[b]:
UpperCAmelCase__ : List[str] = i
a += 1
# move to next
placement *= RADIX
return list_of_ints
if __name__ == "__main__":
import doctest
doctest.testmod()
| 660 |
"""simple docstring"""
import warnings
from pathlib import Path
from typing import List, Tuple, Union
import fire
from torch import nn
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer, PreTrainedModel
from transformers.utils import logging
A__ : Tuple = logging.get_logger(__name__)
def a__ ( lowerCAmelCase : nn.ModuleList , lowerCAmelCase : nn.ModuleList , lowerCAmelCase : List[int] ):
'''simple docstring'''
UpperCAmelCase__ : Optional[Any] = nn.ModuleList([src_layers[i] for i in layers_to_copy] )
assert len(lowerCAmelCase ) == len(lowerCAmelCase ), F"{len(lowerCAmelCase )} != {len(lowerCAmelCase )}"
dest_layers.load_state_dict(layers_to_copy.state_dict() )
A__ : List[Any] = {
# maps num layers in teacher -> num_layers in student -> which teacher layers to copy.
# 12: bart, 16: pegasus, 6: marian/Helsinki-NLP
12: {
1: [0], # This says that if the teacher has 12 layers and the student has 1, copy layer 0 of the teacher
2: [0, 6],
3: [0, 6, 11],
4: [0, 4, 8, 11],
6: [0, 2, 4, 7, 9, 11],
9: [0, 1, 2, 4, 5, 7, 9, 10, 11],
12: list(range(12)),
},
16: { # maps num layers in student -> which teacher layers to copy
1: [0],
2: [0, 15],
3: [0, 8, 15],
4: [0, 5, 10, 15],
6: [0, 3, 6, 9, 12, 15],
8: [0, 2, 4, 6, 8, 10, 12, 15],
9: [0, 1, 3, 5, 7, 9, 11, 13, 15],
12: [0, 1, 2, 3, 4, 5, 6, 7, 9, 11, 13, 15],
16: list(range(16)),
},
6: {1: [0], 2: [0, 5], 3: [0, 2, 5], 4: [0, 1, 3, 5], 6: list(range(6))},
}
A__ : List[Any] = {
# maps num layers in student -> which teacher layers to copy.
6: {1: [5], 2: [3, 5], 3: [1, 4, 5], 4: [1, 2, 4, 5]},
12: {1: [11], 2: [5, 11], 3: [3, 7, 11], 6: [1, 3, 5, 8, 10, 11]},
16: {1: [15], 4: [4, 9, 12, 15], 8: [1, 3, 5, 7, 9, 11, 13, 15]},
}
def a__ ( lowerCAmelCase : Dict , lowerCAmelCase : Dict ):
'''simple docstring'''
try:
UpperCAmelCase__ : Tuple = LAYERS_TO_COPY[n_teacher][n_student]
return val
except KeyError:
if n_student != n_teacher:
warnings.warn(
F"no hardcoded layers to copy for teacher {n_teacher} -> student {n_student}, defaulting to first"
F" {n_student}" )
return list(range(lowerCAmelCase ) )
def a__ ( lowerCAmelCase : int , lowerCAmelCase : Tuple ):
'''simple docstring'''
if n_student > n_teacher:
raise ValueError(F"Cannot perform intermediate supervision for student {n_student} > teacher {n_teacher}" )
elif n_teacher == n_student:
return list(range(lowerCAmelCase ) )
elif n_student == 1:
return [n_teacher - 1]
else:
return LAYERS_TO_SUPERVISE[n_teacher][n_student]
def a__ ( lowerCAmelCase : Union[str, PreTrainedModel] , lowerCAmelCase : Union[str, Path] = "student" , lowerCAmelCase : Union[int, None] = None , lowerCAmelCase : Union[int, None] = None , lowerCAmelCase : List[str]=False , lowerCAmelCase : List[str]=None , lowerCAmelCase : List[str]=None , **lowerCAmelCase : List[str] , ):
'''simple docstring'''
UpperCAmelCase__ : List[str] = "encoder_layers and decoder_layers cannot be both None-- you would just have an identical teacher."
assert (e is not None) or (d is not None), _msg
if isinstance(lowerCAmelCase , lowerCAmelCase ):
AutoTokenizer.from_pretrained(lowerCAmelCase ).save_pretrained(lowerCAmelCase ) # purely for convenience
UpperCAmelCase__ : List[str] = AutoModelForSeqaSeqLM.from_pretrained(lowerCAmelCase ).eval()
else:
assert isinstance(lowerCAmelCase , lowerCAmelCase ), F"teacher must be a model or string got type {type(lowerCAmelCase )}"
UpperCAmelCase__ : int = teacher.config.to_diff_dict()
try:
UpperCAmelCase__ , UpperCAmelCase__ : Optional[Any] = teacher.config.encoder_layers, teacher.config.decoder_layers
if e is None:
UpperCAmelCase__ : Tuple = teacher_e
if d is None:
UpperCAmelCase__ : str = teacher_d
init_kwargs.update({"encoder_layers": e, "decoder_layers": d} )
except AttributeError: # T5
if hasattr(teacher.config , "num_encoder_layers" ):
UpperCAmelCase__ , UpperCAmelCase__ : Union[str, Any] = teacher.config.num_encoder_layers, teacher.config.num_decoder_layers
else:
UpperCAmelCase__ , UpperCAmelCase__ : Union[str, Any] = teacher.config.num_layers, teacher.config.num_decoder_layers
if e is None:
UpperCAmelCase__ : Optional[Any] = teacher_e
if d is None:
UpperCAmelCase__ : Optional[Any] = teacher_d
if hasattr(teacher.config , "num_encoder_layers" ):
init_kwargs.update({"num_encoder_layers": e, "num_decoder_layers": d} )
else:
init_kwargs.update({"num_layers": e, "num_decoder_layers": d} )
# Kwargs to instantiate student: teacher kwargs with updated layer numbers + **extra_config_kwargs
init_kwargs.update(lowerCAmelCase )
# Copy weights
UpperCAmelCase__ : Tuple = teacher.config_class(**lowerCAmelCase )
UpperCAmelCase__ : List[str] = AutoModelForSeqaSeqLM.from_config(lowerCAmelCase )
# Start by copying the full teacher state dict this will copy the first N teacher layers to the student.
UpperCAmelCase__ : Optional[int] = student.load_state_dict(teacher.state_dict() , strict=lowerCAmelCase )
assert info.missing_keys == [], info.missing_keys # every student key should have a teacher keys.
if copy_first_teacher_layers: # Our copying is done. We just log and save
UpperCAmelCase__ , UpperCAmelCase__ : int = list(range(lowerCAmelCase ) ), list(range(lowerCAmelCase ) )
logger.info(
F"Copied encoder layers {e_layers_to_copy} and decoder layers {d_layers_to_copy}. Saving them to"
F" {save_path}" )
student.save_pretrained(lowerCAmelCase )
return student, e_layers_to_copy, d_layers_to_copy
# Decide which layers of the teacher to copy. Not exactly alternating -- we try to keep first and last layer.
if e_layers_to_copy is None:
UpperCAmelCase__ : List[int] = pick_layers_to_copy(lowerCAmelCase , lowerCAmelCase )
if d_layers_to_copy is None:
UpperCAmelCase__ : List[int] = pick_layers_to_copy(lowerCAmelCase , lowerCAmelCase )
try:
if hasattr(
lowerCAmelCase , "prophetnet" ): # For ProphetNet, student.model.encoder.layers is called student.prophetnet.encoder.layers
copy_layers(teacher.prophetnet.encoder.layers , student.prophetnet.encoder.layers , lowerCAmelCase )
copy_layers(teacher.prophetnet.decoder.layers , student.prophetnet.decoder.layers , lowerCAmelCase )
else:
copy_layers(teacher.model.encoder.layers , student.model.encoder.layers , lowerCAmelCase )
copy_layers(teacher.model.decoder.layers , student.model.decoder.layers , lowerCAmelCase )
except AttributeError: # For t5, student.model.encoder.layers is called student.encoder.block
copy_layers(teacher.encoder.block , student.encoder.block , lowerCAmelCase )
copy_layers(teacher.decoder.block , student.decoder.block , lowerCAmelCase )
logger.info(
F"Copied encoder layers {e_layers_to_copy} and decoder layers {d_layers_to_copy}. Saving them to {save_path}" )
UpperCAmelCase__ : int = {
"teacher_type": teacher.config.model_type,
"copied_encoder_layers": e_layers_to_copy,
"copied_decoder_layers": d_layers_to_copy,
}
student.save_pretrained(lowerCAmelCase )
# Save information about copying for easier reproducibility
return student, e_layers_to_copy, d_layers_to_copy
if __name__ == "__main__":
fire.Fire(create_student_by_copying_alternating_layers)
| 660 | 1 |
"""simple docstring"""
import argparse
from pathlib import Path
import fairseq
import torch
from fairseq.models.xmod import XMODModel as FairseqXmodModel
from packaging import version
from transformers import XmodConfig, XmodForMaskedLM, XmodForSequenceClassification
from transformers.utils import logging
if version.parse(fairseq.__version__) < version.parse("""0.12.2"""):
raise Exception("""requires fairseq >= 0.12.2""")
if version.parse(fairseq.__version__) > version.parse("""2"""):
raise Exception("""requires fairseq < v2""")
logging.set_verbosity_info()
A__ : Union[str, Any] = logging.get_logger(__name__)
A__ : int = """Hello, World!"""
A__ : Any = """en_XX"""
def a__ ( lowerCAmelCase : str , lowerCAmelCase : str , lowerCAmelCase : bool ):
'''simple docstring'''
UpperCAmelCase__ : Any = Path("data_bin" )
UpperCAmelCase__ : Optional[Any] = FairseqXmodModel.from_pretrained(
model_name_or_path=str(Path(lowerCAmelCase ).parent ) , checkpoint_file=Path(lowerCAmelCase ).name , _name="xmod_base" , arch="xmod_base" , task="multilingual_masked_lm" , data_name_or_path=str(lowerCAmelCase ) , bpe="sentencepiece" , sentencepiece_model=str(Path(lowerCAmelCase ).parent / "sentencepiece.bpe.model" ) , src_dict=str(data_dir / "dict.txt" ) , )
xmod.eval() # disable dropout
print(lowerCAmelCase )
UpperCAmelCase__ : Dict = xmod.model.encoder.sentence_encoder
UpperCAmelCase__ : int = XmodConfig(
vocab_size=xmod_sent_encoder.embed_tokens.num_embeddings , hidden_size=xmod.cfg.model.encoder_embed_dim , num_hidden_layers=xmod.cfg.model.encoder_layers , num_attention_heads=xmod.cfg.model.encoder_attention_heads , intermediate_size=xmod.cfg.model.encoder_ffn_embed_dim , max_position_embeddings=514 , type_vocab_size=1 , layer_norm_eps=1E-5 , pre_norm=xmod.cfg.model.encoder_normalize_before , adapter_reduction_factor=getattr(xmod.cfg.model , "bottleneck" , 2 ) , adapter_layer_norm=xmod.cfg.model.adapter_layer_norm , adapter_reuse_layer_norm=xmod.cfg.model.adapter_reuse_layer_norm , ln_before_adapter=xmod.cfg.model.ln_before_adapter , languages=xmod.cfg.model.languages , )
if classification_head:
UpperCAmelCase__ : str = xmod.model.classification_heads["mnli"].out_proj.weight.shape[0]
print("Our X-MOD config:" , lowerCAmelCase )
UpperCAmelCase__ : Optional[int] = XmodForSequenceClassification(lowerCAmelCase ) if classification_head else XmodForMaskedLM(lowerCAmelCase )
model.eval()
# Now let's copy all the weights.
# Embeddings
UpperCAmelCase__ : List[Any] = xmod_sent_encoder.embed_tokens.weight
UpperCAmelCase__ : Optional[int] = xmod_sent_encoder.embed_positions.weight
UpperCAmelCase__ : Union[str, Any] = torch.zeros_like(
model.roberta.embeddings.token_type_embeddings.weight ) # just zero them out b/c xmod doesn't use them.
UpperCAmelCase__ : Any = xmod_sent_encoder.layernorm_embedding.weight
UpperCAmelCase__ : Tuple = xmod_sent_encoder.layernorm_embedding.bias
for i in range(config.num_hidden_layers ):
# Encoder: start of layer
UpperCAmelCase__ : int = model.roberta.encoder.layer[i]
UpperCAmelCase__ : Tuple = xmod_sent_encoder.layers[i]
# self attention
UpperCAmelCase__ : Union[str, Any] = layer.attention.self
if not (
xmod_layer.self_attn.k_proj.weight.data.shape
== xmod_layer.self_attn.q_proj.weight.data.shape
== xmod_layer.self_attn.v_proj.weight.data.shape
== torch.Size((config.hidden_size, config.hidden_size) )
):
raise AssertionError("Dimensions of self-attention weights do not match." )
UpperCAmelCase__ : Any = xmod_layer.self_attn.q_proj.weight
UpperCAmelCase__ : Tuple = xmod_layer.self_attn.q_proj.bias
UpperCAmelCase__ : List[Any] = xmod_layer.self_attn.k_proj.weight
UpperCAmelCase__ : Any = xmod_layer.self_attn.k_proj.bias
UpperCAmelCase__ : List[Any] = xmod_layer.self_attn.v_proj.weight
UpperCAmelCase__ : List[Any] = xmod_layer.self_attn.v_proj.bias
# self-attention output
UpperCAmelCase__ : Optional[Any] = layer.attention.output
if self_output.dense.weight.shape != xmod_layer.self_attn.out_proj.weight.shape:
raise AssertionError("Dimensions of self-attention output weights do not match." )
UpperCAmelCase__ : List[Any] = xmod_layer.self_attn.out_proj.weight
UpperCAmelCase__ : Any = xmod_layer.self_attn.out_proj.bias
UpperCAmelCase__ : Union[str, Any] = xmod_layer.self_attn_layer_norm.weight
UpperCAmelCase__ : Optional[Any] = xmod_layer.self_attn_layer_norm.bias
# intermediate
UpperCAmelCase__ : List[str] = layer.intermediate
if intermediate.dense.weight.shape != xmod_layer.fca.weight.shape:
raise AssertionError("Dimensions of intermediate weights do not match." )
UpperCAmelCase__ : Optional[int] = xmod_layer.fca.weight
UpperCAmelCase__ : str = xmod_layer.fca.bias
# output
UpperCAmelCase__ : Union[str, Any] = layer.output
if bert_output.dense.weight.shape != xmod_layer.fca.weight.shape:
raise AssertionError("Dimensions of feed-forward weights do not match." )
UpperCAmelCase__ : str = xmod_layer.fca.weight
UpperCAmelCase__ : List[Any] = xmod_layer.fca.bias
UpperCAmelCase__ : Any = xmod_layer.final_layer_norm.weight
UpperCAmelCase__ : Tuple = xmod_layer.final_layer_norm.bias
if bert_output.adapter_layer_norm is not None:
UpperCAmelCase__ : List[str] = xmod_layer.adapter_layer_norm.weight
UpperCAmelCase__ : Any = xmod_layer.adapter_layer_norm.bias
if sorted(bert_output.adapter_modules.keys() ) != sorted(xmod_layer.adapter_modules.keys() ):
raise AssertionError("Lists of language adapters do not match." )
for lang_code, adapter in xmod_layer.adapter_modules.items():
UpperCAmelCase__ : Any = bert_output.adapter_modules[lang_code]
UpperCAmelCase__ : Dict = xmod_layer.adapter_modules[lang_code]
UpperCAmelCase__ : Optional[Any] = from_adapter.fca.weight
UpperCAmelCase__ : Dict = from_adapter.fca.bias
UpperCAmelCase__ : Optional[int] = from_adapter.fca.weight
UpperCAmelCase__ : str = from_adapter.fca.bias
# end of layer
if xmod_sent_encoder.layer_norm is not None:
UpperCAmelCase__ : str = xmod_sent_encoder.layer_norm.weight
UpperCAmelCase__ : List[Any] = xmod_sent_encoder.layer_norm.bias
if classification_head:
UpperCAmelCase__ : Dict = xmod.model.classification_heads["mnli"].dense.weight
UpperCAmelCase__ : Optional[int] = xmod.model.classification_heads["mnli"].dense.bias
UpperCAmelCase__ : Optional[Any] = xmod.model.classification_heads["mnli"].out_proj.weight
UpperCAmelCase__ : Optional[int] = xmod.model.classification_heads["mnli"].out_proj.bias
else:
# LM Head
UpperCAmelCase__ : List[Any] = xmod.model.encoder.lm_head.dense.weight
UpperCAmelCase__ : Dict = xmod.model.encoder.lm_head.dense.bias
UpperCAmelCase__ : Union[str, Any] = xmod.model.encoder.lm_head.layer_norm.weight
UpperCAmelCase__ : Dict = xmod.model.encoder.lm_head.layer_norm.bias
UpperCAmelCase__ : Dict = xmod.model.encoder.lm_head.weight
UpperCAmelCase__ : Any = xmod.model.encoder.lm_head.bias
# Let's check that we get the same results.
UpperCAmelCase__ : Dict = xmod.encode(lowerCAmelCase ).unsqueeze(0 ) # batch of size 1
model.roberta.set_default_language(lowerCAmelCase )
UpperCAmelCase__ : Dict = model(lowerCAmelCase )[0]
if classification_head:
UpperCAmelCase__ : Any = xmod.model.classification_heads["mnli"](xmod.extract_features(lowerCAmelCase ) )
else:
UpperCAmelCase__ : Dict = xmod.model(lowerCAmelCase , lang_id=[SAMPLE_LANGUAGE] )[0]
print(our_output.shape , their_output.shape )
UpperCAmelCase__ : str = torch.max(torch.abs(our_output - their_output ) ).item()
print(F"max_absolute_diff = {max_absolute_diff}" ) # ~ 1e-7
UpperCAmelCase__ : Optional[int] = torch.allclose(lowerCAmelCase , lowerCAmelCase , atol=1E-3 )
print("Do both models output the same tensors?" , "🔥" if success else "💩" )
if not success:
raise Exception("Something went wRoNg" )
Path(lowerCAmelCase ).mkdir(parents=lowerCAmelCase , exist_ok=lowerCAmelCase )
print(F"Saving model to {pytorch_dump_folder_path}" )
model.save_pretrained(lowerCAmelCase )
if __name__ == "__main__":
A__ : int = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--xmod_checkpoint_path""", default=None, type=str, required=True, help="""Path the official PyTorch dump."""
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
parser.add_argument(
"""--classification_head""", action="""store_true""", help="""Whether to convert a final classification head."""
)
A__ : int = parser.parse_args()
convert_xmod_checkpoint_to_pytorch(
args.xmod_checkpoint_path, args.pytorch_dump_folder_path, args.classification_head
)
| 660 |
"""simple docstring"""
import unittest
import numpy as np
from diffusers import LMSDiscreteScheduler, OnnxStableDiffusionInpaintPipeline
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
nightly,
require_onnxruntime,
require_torch_gpu,
)
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class _lowercase ( lowerCAmelCase_ , unittest.TestCase ):
'''simple docstring'''
pass
@nightly
@require_onnxruntime
@require_torch_gpu
class _lowercase ( unittest.TestCase ):
'''simple docstring'''
@property
def lowerCAmelCase__ ( self )-> int:
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def lowerCAmelCase__ ( self )-> Any:
UpperCAmelCase__ : Tuple = ort.SessionOptions()
UpperCAmelCase__ : List[str] = False
return options
def lowerCAmelCase__ ( self )-> List[str]:
UpperCAmelCase__ : Union[str, Any] = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/in_paint/overture-creations-5sI6fQgYIuo.png" )
UpperCAmelCase__ : int = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/in_paint/overture-creations-5sI6fQgYIuo_mask.png" )
UpperCAmelCase__ : str = OnnxStableDiffusionInpaintPipeline.from_pretrained(
"runwayml/stable-diffusion-inpainting" , revision="onnx" , safety_checker=__UpperCamelCase , feature_extractor=__UpperCamelCase , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=__UpperCamelCase )
UpperCAmelCase__ : int = "A red cat sitting on a park bench"
UpperCAmelCase__ : Tuple = np.random.RandomState(0 )
UpperCAmelCase__ : Any = pipe(
prompt=__UpperCamelCase , image=__UpperCamelCase , mask_image=__UpperCamelCase , guidance_scale=7.5 , num_inference_steps=10 , generator=__UpperCamelCase , output_type="np" , )
UpperCAmelCase__ : Tuple = output.images
UpperCAmelCase__ : Dict = images[0, 2_55:2_58, 2_55:2_58, -1]
assert images.shape == (1, 5_12, 5_12, 3)
UpperCAmelCase__ : Union[str, Any] = np.array([0.2514, 0.3007, 0.3517, 0.1790, 0.2382, 0.3167, 0.1944, 0.2273, 0.2464] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def lowerCAmelCase__ ( self )-> Tuple:
UpperCAmelCase__ : Tuple = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/in_paint/overture-creations-5sI6fQgYIuo.png" )
UpperCAmelCase__ : Tuple = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/in_paint/overture-creations-5sI6fQgYIuo_mask.png" )
UpperCAmelCase__ : Optional[Any] = LMSDiscreteScheduler.from_pretrained(
"runwayml/stable-diffusion-inpainting" , subfolder="scheduler" , revision="onnx" )
UpperCAmelCase__ : Optional[Any] = OnnxStableDiffusionInpaintPipeline.from_pretrained(
"runwayml/stable-diffusion-inpainting" , revision="onnx" , scheduler=__UpperCamelCase , safety_checker=__UpperCamelCase , feature_extractor=__UpperCamelCase , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=__UpperCamelCase )
UpperCAmelCase__ : int = "A red cat sitting on a park bench"
UpperCAmelCase__ : List[str] = np.random.RandomState(0 )
UpperCAmelCase__ : str = pipe(
prompt=__UpperCamelCase , image=__UpperCamelCase , mask_image=__UpperCamelCase , guidance_scale=7.5 , num_inference_steps=20 , generator=__UpperCamelCase , output_type="np" , )
UpperCAmelCase__ : List[str] = output.images
UpperCAmelCase__ : List[Any] = images[0, 2_55:2_58, 2_55:2_58, -1]
assert images.shape == (1, 5_12, 5_12, 3)
UpperCAmelCase__ : int = np.array([0.0086, 0.0077, 0.0083, 0.0093, 0.0107, 0.0139, 0.0094, 0.0097, 0.0125] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
| 660 | 1 |
"""simple docstring"""
import gc
import random
import tempfile
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMInverseScheduler,
DDIMScheduler,
DPMSolverMultistepInverseScheduler,
DPMSolverMultistepScheduler,
StableDiffusionDiffEditPipeline,
UNetaDConditionModel,
)
from diffusers.utils import load_image, slow
from diffusers.utils.testing_utils import enable_full_determinism, floats_tensor, require_torch_gpu, torch_device
from ..pipeline_params import TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class _lowercase ( lowerCAmelCase_ , lowerCAmelCase_ , unittest.TestCase ):
'''simple docstring'''
_A = StableDiffusionDiffEditPipeline
_A = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {'height', 'width', 'image'} | {'image_latents'}
_A = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS - {'image'} | {'image_latents'}
_A = frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
_A = frozenset([] )
def lowerCAmelCase__ ( self )-> Any:
torch.manual_seed(0 )
UpperCAmelCase__ : Union[str, Any] = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , up_block_types=("CrossAttnUpBlock2D", "UpBlock2D") , cross_attention_dim=32 , attention_head_dim=(2, 4) , use_linear_projection=__UpperCamelCase , )
UpperCAmelCase__ : int = DDIMScheduler(
beta_start=0.0_0085 , beta_end=0.012 , beta_schedule="scaled_linear" , clip_sample=__UpperCamelCase , set_alpha_to_one=__UpperCamelCase , )
UpperCAmelCase__ : Optional[Any] = DDIMInverseScheduler(
beta_start=0.0_0085 , beta_end=0.012 , beta_schedule="scaled_linear" , clip_sample=__UpperCamelCase , set_alpha_to_zero=__UpperCamelCase , )
torch.manual_seed(0 )
UpperCAmelCase__ : Dict = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , sample_size=1_28 , )
torch.manual_seed(0 )
UpperCAmelCase__ : Tuple = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , hidden_act="gelu" , projection_dim=5_12 , )
UpperCAmelCase__ : Optional[int] = CLIPTextModel(__UpperCamelCase )
UpperCAmelCase__ : int = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
UpperCAmelCase__ : str = {
"unet": unet,
"scheduler": scheduler,
"inverse_scheduler": inverse_scheduler,
"vae": vae,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"safety_checker": None,
"feature_extractor": None,
}
return components
def lowerCAmelCase__ ( self , __UpperCamelCase , __UpperCamelCase=0 )-> List[Any]:
UpperCAmelCase__ : Optional[Any] = floats_tensor((1, 16, 16) , rng=random.Random(__UpperCamelCase ) ).to(__UpperCamelCase )
UpperCAmelCase__ : Union[str, Any] = floats_tensor((1, 2, 4, 16, 16) , rng=random.Random(__UpperCamelCase ) ).to(__UpperCamelCase )
if str(__UpperCamelCase ).startswith("mps" ):
UpperCAmelCase__ : List[Any] = torch.manual_seed(__UpperCamelCase )
else:
UpperCAmelCase__ : str = torch.Generator(device=__UpperCamelCase ).manual_seed(__UpperCamelCase )
UpperCAmelCase__ : Optional[int] = {
"prompt": "a dog and a newt",
"mask_image": mask,
"image_latents": latents,
"generator": generator,
"num_inference_steps": 2,
"inpaint_strength": 1.0,
"guidance_scale": 6.0,
"output_type": "numpy",
}
return inputs
def lowerCAmelCase__ ( self , __UpperCamelCase , __UpperCamelCase=0 )-> str:
UpperCAmelCase__ : str = floats_tensor((1, 3, 32, 32) , rng=random.Random(__UpperCamelCase ) ).to(__UpperCamelCase )
UpperCAmelCase__ : List[Any] = image.cpu().permute(0 , 2 , 3 , 1 )[0]
UpperCAmelCase__ : Union[str, Any] = Image.fromarray(np.uinta(__UpperCamelCase ) ).convert("RGB" )
if str(__UpperCamelCase ).startswith("mps" ):
UpperCAmelCase__ : str = torch.manual_seed(__UpperCamelCase )
else:
UpperCAmelCase__ : Optional[Any] = torch.Generator(device=__UpperCamelCase ).manual_seed(__UpperCamelCase )
UpperCAmelCase__ : List[str] = {
"image": image,
"source_prompt": "a cat and a frog",
"target_prompt": "a dog and a newt",
"generator": generator,
"num_inference_steps": 2,
"num_maps_per_mask": 2,
"mask_encode_strength": 1.0,
"guidance_scale": 6.0,
"output_type": "numpy",
}
return inputs
def lowerCAmelCase__ ( self , __UpperCamelCase , __UpperCamelCase=0 )-> str:
UpperCAmelCase__ : List[str] = floats_tensor((1, 3, 32, 32) , rng=random.Random(__UpperCamelCase ) ).to(__UpperCamelCase )
UpperCAmelCase__ : Dict = image.cpu().permute(0 , 2 , 3 , 1 )[0]
UpperCAmelCase__ : Tuple = Image.fromarray(np.uinta(__UpperCamelCase ) ).convert("RGB" )
if str(__UpperCamelCase ).startswith("mps" ):
UpperCAmelCase__ : Optional[int] = torch.manual_seed(__UpperCamelCase )
else:
UpperCAmelCase__ : Optional[Any] = torch.Generator(device=__UpperCamelCase ).manual_seed(__UpperCamelCase )
UpperCAmelCase__ : Optional[int] = {
"image": image,
"prompt": "a cat and a frog",
"generator": generator,
"num_inference_steps": 2,
"inpaint_strength": 1.0,
"guidance_scale": 6.0,
"decode_latents": True,
"output_type": "numpy",
}
return inputs
def lowerCAmelCase__ ( self )-> Union[str, Any]:
if not hasattr(self.pipeline_class , "_optional_components" ):
return
UpperCAmelCase__ : List[Any] = self.get_dummy_components()
UpperCAmelCase__ : List[str] = self.pipeline_class(**__UpperCamelCase )
pipe.to(__UpperCamelCase )
pipe.set_progress_bar_config(disable=__UpperCamelCase )
# set all optional components to None and update pipeline config accordingly
for optional_component in pipe._optional_components:
setattr(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
pipe.register_modules(**{optional_component: None for optional_component in pipe._optional_components} )
UpperCAmelCase__ : Optional[Any] = self.get_dummy_inputs(__UpperCamelCase )
UpperCAmelCase__ : Tuple = pipe(**__UpperCamelCase )[0]
with tempfile.TemporaryDirectory() as tmpdir:
pipe.save_pretrained(__UpperCamelCase )
UpperCAmelCase__ : List[Any] = self.pipeline_class.from_pretrained(__UpperCamelCase )
pipe_loaded.to(__UpperCamelCase )
pipe_loaded.set_progress_bar_config(disable=__UpperCamelCase )
for optional_component in pipe._optional_components:
self.assertTrue(
getattr(__UpperCamelCase , __UpperCamelCase ) is None , F"`{optional_component}` did not stay set to None after loading." , )
UpperCAmelCase__ : Dict = self.get_dummy_inputs(__UpperCamelCase )
UpperCAmelCase__ : List[Any] = pipe_loaded(**__UpperCamelCase )[0]
UpperCAmelCase__ : str = np.abs(output - output_loaded ).max()
self.assertLess(__UpperCamelCase , 1E-4 )
def lowerCAmelCase__ ( self )-> Any:
UpperCAmelCase__ : List[Any] = "cpu"
UpperCAmelCase__ : List[Any] = self.get_dummy_components()
UpperCAmelCase__ : Union[str, Any] = self.pipeline_class(**__UpperCamelCase )
pipe.to(__UpperCamelCase )
pipe.set_progress_bar_config(disable=__UpperCamelCase )
UpperCAmelCase__ : Optional[Any] = self.get_dummy_mask_inputs(__UpperCamelCase )
UpperCAmelCase__ : Union[str, Any] = pipe.generate_mask(**__UpperCamelCase )
UpperCAmelCase__ : Dict = mask[0, -3:, -3:]
self.assertEqual(mask.shape , (1, 16, 16) )
UpperCAmelCase__ : Any = np.array([0] * 9 )
UpperCAmelCase__ : List[Any] = np.abs(mask_slice.flatten() - expected_slice ).max()
self.assertLessEqual(__UpperCamelCase , 1E-3 )
self.assertEqual(mask[0, -3, -4] , 0 )
def lowerCAmelCase__ ( self )-> str:
UpperCAmelCase__ : List[str] = "cpu"
UpperCAmelCase__ : str = self.get_dummy_components()
UpperCAmelCase__ : Dict = self.pipeline_class(**__UpperCamelCase )
pipe.to(__UpperCamelCase )
pipe.set_progress_bar_config(disable=__UpperCamelCase )
UpperCAmelCase__ : int = self.get_dummy_inversion_inputs(__UpperCamelCase )
UpperCAmelCase__ : Optional[int] = pipe.invert(**__UpperCamelCase ).images
UpperCAmelCase__ : int = image[0, -1, -3:, -3:]
self.assertEqual(image.shape , (2, 32, 32, 3) )
UpperCAmelCase__ : str = np.array(
[0.5150, 0.5134, 0.5043, 0.5376, 0.4694, 0.5_1050, 0.5015, 0.4407, 0.4799] , )
UpperCAmelCase__ : str = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(__UpperCamelCase , 1E-3 )
def lowerCAmelCase__ ( self )-> Dict:
super().test_inference_batch_single_identical(expected_max_diff=5E-3 )
def lowerCAmelCase__ ( self )-> Tuple:
UpperCAmelCase__ : Union[str, Any] = "cpu"
UpperCAmelCase__ : Dict = self.get_dummy_components()
UpperCAmelCase__ : List[str] = {"beta_start": 0.0_0085, "beta_end": 0.012, "beta_schedule": "scaled_linear"}
UpperCAmelCase__ : Any = DPMSolverMultistepScheduler(**__UpperCamelCase )
UpperCAmelCase__ : Optional[Any] = DPMSolverMultistepInverseScheduler(**__UpperCamelCase )
UpperCAmelCase__ : Optional[Any] = self.pipeline_class(**__UpperCamelCase )
pipe.to(__UpperCamelCase )
pipe.set_progress_bar_config(disable=__UpperCamelCase )
UpperCAmelCase__ : Optional[Any] = self.get_dummy_inversion_inputs(__UpperCamelCase )
UpperCAmelCase__ : List[str] = pipe.invert(**__UpperCamelCase ).images
UpperCAmelCase__ : Any = image[0, -1, -3:, -3:]
self.assertEqual(image.shape , (2, 32, 32, 3) )
UpperCAmelCase__ : Any = np.array(
[0.5150, 0.5134, 0.5043, 0.5376, 0.4694, 0.5_1050, 0.5015, 0.4407, 0.4799] , )
UpperCAmelCase__ : Union[str, Any] = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(__UpperCamelCase , 1E-3 )
@require_torch_gpu
@slow
class _lowercase ( unittest.TestCase ):
'''simple docstring'''
def lowerCAmelCase__ ( self )-> Union[str, Any]:
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@classmethod
def lowerCAmelCase__ ( cls )-> Any:
UpperCAmelCase__ : Any = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/diffedit/fruit.png" )
UpperCAmelCase__ : Tuple = raw_image.convert("RGB" ).resize((7_68, 7_68) )
UpperCAmelCase__ : int = raw_image
def lowerCAmelCase__ ( self )-> Dict:
UpperCAmelCase__ : Any = torch.manual_seed(0 )
UpperCAmelCase__ : Dict = StableDiffusionDiffEditPipeline.from_pretrained(
"stabilityai/stable-diffusion-2-1" , safety_checker=__UpperCamelCase , torch_dtype=torch.floataa )
UpperCAmelCase__ : Optional[Any] = DDIMScheduler.from_config(pipe.scheduler.config )
UpperCAmelCase__ : Optional[int] = DDIMInverseScheduler.from_config(pipe.scheduler.config )
pipe.enable_model_cpu_offload()
pipe.set_progress_bar_config(disable=__UpperCamelCase )
UpperCAmelCase__ : Optional[Any] = "a bowl of fruit"
UpperCAmelCase__ : Optional[Any] = "a bowl of pears"
UpperCAmelCase__ : Optional[Any] = pipe.generate_mask(
image=self.raw_image , source_prompt=__UpperCamelCase , target_prompt=__UpperCamelCase , generator=__UpperCamelCase , )
UpperCAmelCase__ : Union[str, Any] = pipe.invert(
prompt=__UpperCamelCase , image=self.raw_image , inpaint_strength=0.7 , generator=__UpperCamelCase ).latents
UpperCAmelCase__ : Any = pipe(
prompt=__UpperCamelCase , mask_image=__UpperCamelCase , image_latents=__UpperCamelCase , generator=__UpperCamelCase , negative_prompt=__UpperCamelCase , inpaint_strength=0.7 , output_type="numpy" , ).images[0]
UpperCAmelCase__ : Tuple = (
np.array(
load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/diffedit/pears.png" ).resize((7_68, 7_68) ) )
/ 2_55
)
assert np.abs((expected_image - image).max() ) < 5E-1
def lowerCAmelCase__ ( self )-> Dict:
UpperCAmelCase__ : Dict = torch.manual_seed(0 )
UpperCAmelCase__ : Optional[Any] = StableDiffusionDiffEditPipeline.from_pretrained(
"stabilityai/stable-diffusion-2-1" , safety_checker=__UpperCamelCase , torch_dtype=torch.floataa )
UpperCAmelCase__ : str = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
UpperCAmelCase__ : Optional[Any] = DPMSolverMultistepInverseScheduler.from_config(pipe.scheduler.config )
pipe.enable_model_cpu_offload()
pipe.set_progress_bar_config(disable=__UpperCamelCase )
UpperCAmelCase__ : Tuple = "a bowl of fruit"
UpperCAmelCase__ : List[Any] = "a bowl of pears"
UpperCAmelCase__ : Optional[Any] = pipe.generate_mask(
image=self.raw_image , source_prompt=__UpperCamelCase , target_prompt=__UpperCamelCase , generator=__UpperCamelCase , )
UpperCAmelCase__ : Any = pipe.invert(
prompt=__UpperCamelCase , image=self.raw_image , inpaint_strength=0.7 , generator=__UpperCamelCase , num_inference_steps=25 , ).latents
UpperCAmelCase__ : Any = pipe(
prompt=__UpperCamelCase , mask_image=__UpperCamelCase , image_latents=__UpperCamelCase , generator=__UpperCamelCase , negative_prompt=__UpperCamelCase , inpaint_strength=0.7 , num_inference_steps=25 , output_type="numpy" , ).images[0]
UpperCAmelCase__ : Optional[Any] = (
np.array(
load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/diffedit/pears.png" ).resize((7_68, 7_68) ) )
/ 2_55
)
assert np.abs((expected_image - image).max() ) < 5E-1
| 660 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
A__ : Union[str, Any] = logging.get_logger(__name__)
A__ : Optional[int] = {
"""microsoft/table-transformer-detection""": (
"""https://huggingface.co/microsoft/table-transformer-detection/resolve/main/config.json"""
),
}
class _lowercase ( lowerCAmelCase_ ):
'''simple docstring'''
_A = 'table-transformer'
_A = ['past_key_values']
_A = {
'hidden_size': 'd_model',
'num_attention_heads': 'encoder_attention_heads',
}
def __init__( self , __UpperCamelCase=True , __UpperCamelCase=None , __UpperCamelCase=3 , __UpperCamelCase=1_00 , __UpperCamelCase=6 , __UpperCamelCase=20_48 , __UpperCamelCase=8 , __UpperCamelCase=6 , __UpperCamelCase=20_48 , __UpperCamelCase=8 , __UpperCamelCase=0.0 , __UpperCamelCase=0.0 , __UpperCamelCase=True , __UpperCamelCase="relu" , __UpperCamelCase=2_56 , __UpperCamelCase=0.1 , __UpperCamelCase=0.0 , __UpperCamelCase=0.0 , __UpperCamelCase=0.02 , __UpperCamelCase=1.0 , __UpperCamelCase=False , __UpperCamelCase="sine" , __UpperCamelCase="resnet50" , __UpperCamelCase=True , __UpperCamelCase=False , __UpperCamelCase=1 , __UpperCamelCase=5 , __UpperCamelCase=2 , __UpperCamelCase=1 , __UpperCamelCase=1 , __UpperCamelCase=5 , __UpperCamelCase=2 , __UpperCamelCase=0.1 , **__UpperCamelCase , )-> List[Any]:
if backbone_config is not None and use_timm_backbone:
raise ValueError("You can't specify both `backbone_config` and `use_timm_backbone`." )
if not use_timm_backbone:
if backbone_config is None:
logger.info("`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone." )
UpperCAmelCase__ : Any = CONFIG_MAPPING["resnet"](out_features=["stage4"] )
elif isinstance(__UpperCamelCase , __UpperCamelCase ):
UpperCAmelCase__ : int = backbone_config.get("model_type" )
UpperCAmelCase__ : Optional[Any] = CONFIG_MAPPING[backbone_model_type]
UpperCAmelCase__ : int = config_class.from_dict(__UpperCamelCase )
# set timm attributes to None
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : List[str] = None, None, None
UpperCAmelCase__ : Optional[int] = use_timm_backbone
UpperCAmelCase__ : Dict = backbone_config
UpperCAmelCase__ : List[Any] = num_channels
UpperCAmelCase__ : Any = num_queries
UpperCAmelCase__ : int = d_model
UpperCAmelCase__ : Optional[int] = encoder_ffn_dim
UpperCAmelCase__ : str = encoder_layers
UpperCAmelCase__ : Dict = encoder_attention_heads
UpperCAmelCase__ : Optional[Any] = decoder_ffn_dim
UpperCAmelCase__ : Tuple = decoder_layers
UpperCAmelCase__ : Optional[Any] = decoder_attention_heads
UpperCAmelCase__ : List[str] = dropout
UpperCAmelCase__ : Tuple = attention_dropout
UpperCAmelCase__ : List[Any] = activation_dropout
UpperCAmelCase__ : Dict = activation_function
UpperCAmelCase__ : Optional[Any] = init_std
UpperCAmelCase__ : List[str] = init_xavier_std
UpperCAmelCase__ : int = encoder_layerdrop
UpperCAmelCase__ : Tuple = decoder_layerdrop
UpperCAmelCase__ : int = encoder_layers
UpperCAmelCase__ : Dict = auxiliary_loss
UpperCAmelCase__ : Union[str, Any] = position_embedding_type
UpperCAmelCase__ : List[str] = backbone
UpperCAmelCase__ : List[Any] = use_pretrained_backbone
UpperCAmelCase__ : List[str] = dilation
# Hungarian matcher
UpperCAmelCase__ : Dict = class_cost
UpperCAmelCase__ : Any = bbox_cost
UpperCAmelCase__ : Tuple = giou_cost
# Loss coefficients
UpperCAmelCase__ : Any = mask_loss_coefficient
UpperCAmelCase__ : Dict = dice_loss_coefficient
UpperCAmelCase__ : Any = bbox_loss_coefficient
UpperCAmelCase__ : Tuple = giou_loss_coefficient
UpperCAmelCase__ : List[Any] = eos_coefficient
super().__init__(is_encoder_decoder=__UpperCamelCase , **__UpperCamelCase )
@property
def lowerCAmelCase__ ( self )-> int:
return self.encoder_attention_heads
@property
def lowerCAmelCase__ ( self )-> int:
return self.d_model
class _lowercase ( lowerCAmelCase_ ):
'''simple docstring'''
_A = version.parse('1.11' )
@property
def lowerCAmelCase__ ( self )-> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
("pixel_mask", {0: "batch"}),
] )
@property
def lowerCAmelCase__ ( self )-> float:
return 1E-5
@property
def lowerCAmelCase__ ( self )-> int:
return 12
| 660 | 1 |
"""simple docstring"""
from math import loga
def a__ ( lowerCAmelCase : int ):
'''simple docstring'''
if a < 0:
raise ValueError("Input value must be a positive integer" )
elif isinstance(lowerCAmelCase , lowerCAmelCase ):
raise TypeError("Input value must be a 'int' type" )
return 0 if (a == 0) else int(loga(a & -a ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 660 |
"""simple docstring"""
import argparse
import shutil
import time
from json import JSONDecodeError
from logging import getLogger
from pathlib import Path
from typing import Dict, List
import torch
from torch.utils.data import DataLoader
from tqdm import tqdm
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
from utils import (
SeqaSeqDataset,
calculate_bleu,
calculate_rouge,
chunks,
lmap,
load_json,
parse_numeric_n_bool_cl_kwargs,
save_json,
use_task_specific_params,
write_txt_file,
)
A__ : int = getLogger(__name__)
def a__ ( lowerCAmelCase : List[Any] , lowerCAmelCase : str , lowerCAmelCase : str , lowerCAmelCase : int = 8 , lowerCAmelCase : int = 1024 , lowerCAmelCase : List[Any]="val" , lowerCAmelCase : str=None , lowerCAmelCase : int=False , lowerCAmelCase : Dict="summarization" , lowerCAmelCase : int=None , lowerCAmelCase : List[str]=1 , lowerCAmelCase : Dict = None , lowerCAmelCase : List[str]="" , **lowerCAmelCase : int , ):
'''simple docstring'''
UpperCAmelCase__ : Dict = str(lowerCAmelCase )
assert local_rank is not None
torch.distributed.init_process_group(backend="nccl" , rank=lowerCAmelCase )
UpperCAmelCase__ : List[str] = Path(lowerCAmelCase )
UpperCAmelCase__ : str = save_dir.joinpath(F"rank_{local_rank}_output.json" )
torch.cuda.set_device(lowerCAmelCase )
UpperCAmelCase__ : Optional[Any] = AutoModelForSeqaSeqLM.from_pretrained(lowerCAmelCase ).cuda()
if fpaa:
UpperCAmelCase__ : List[Any] = model.half()
# determine if we need to increase num_beams
use_task_specific_params(lowerCAmelCase , lowerCAmelCase ) # update config with task specific params
UpperCAmelCase__ : List[Any] = generate_kwargs.pop("num_beams" , model.config.num_beams ) # AttributeError risk?
if num_return_sequences > num_beams:
UpperCAmelCase__ : Any = num_return_sequences
UpperCAmelCase__ : List[Any] = AutoTokenizer.from_pretrained(lowerCAmelCase )
logger.info(F"Inferred tokenizer type: {tokenizer.__class__}" ) # if this is wrong, check config.model_type.
if max_source_length is None:
UpperCAmelCase__ : int = tokenizer.model_max_length
if prefix is None:
UpperCAmelCase__ : Union[str, Any] = prefix or getattr(model.config , "prefix" , "" ) or ""
UpperCAmelCase__ : str = SeqaSeqDataset(
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , max_target_length=1024 , type_path=lowerCAmelCase , n_obs=lowerCAmelCase , prefix=lowerCAmelCase , **lowerCAmelCase , )
# I set shuffle=True for a more accurate progress bar.
# If all the longest samples are first, the prog bar estimate is too high at the beginning.
UpperCAmelCase__ : Union[str, Any] = ds.make_sortish_sampler(lowerCAmelCase , distributed=lowerCAmelCase , add_extra_examples=lowerCAmelCase , shuffle=lowerCAmelCase )
UpperCAmelCase__ : Optional[int] = DataLoader(lowerCAmelCase , sampler=lowerCAmelCase , batch_size=lowerCAmelCase , collate_fn=ds.collate_fn )
UpperCAmelCase__ : str = []
for batch in tqdm(lowerCAmelCase ):
UpperCAmelCase__ : Dict = model.generate(
input_ids=batch["input_ids"].to(model.device ) , attention_mask=batch["attention_mask"].to(model.device ) , num_return_sequences=lowerCAmelCase , num_beams=lowerCAmelCase , **lowerCAmelCase , )
UpperCAmelCase__ : int = tokenizer.batch_decode(lowerCAmelCase , skip_special_tokens=lowerCAmelCase , clean_up_tokenization_spaces=lowerCAmelCase )
UpperCAmelCase__ : int = batch["ids"]
if num_return_sequences > 1:
UpperCAmelCase__ : str = chunks(lowerCAmelCase , lowerCAmelCase ) # batch size chunks, each of size num_return_seq
for i, pred in enumerate(lowerCAmelCase ):
results.append({"pred": pred, "id": ids[i].item()} )
save_json(lowerCAmelCase , lowerCAmelCase )
return results, sampler.num_replicas
def a__ ( ):
'''simple docstring'''
UpperCAmelCase__ : str = argparse.ArgumentParser(
epilog="Unspecified args like --num_beams=2 --decoder_start_token_id=4 are passed to model.generate" )
parser.add_argument("--data_dir" , type=lowerCAmelCase , help="like cnn_dm/test.source" )
parser.add_argument(
"--model_name" , type=lowerCAmelCase , help="like facebook/bart-large-cnn,t5-base, etc." , default="sshleifer/distilbart-xsum-12-3" , )
parser.add_argument("--save_dir" , type=lowerCAmelCase , help="where to save" , default="tmp_gen" )
parser.add_argument("--max_source_length" , type=lowerCAmelCase , default=lowerCAmelCase )
parser.add_argument(
"--type_path" , type=lowerCAmelCase , default="test" , help="which subset to evaluate typically train/val/test" )
parser.add_argument("--task" , type=lowerCAmelCase , default="summarization" , help="used for task_specific_params + metrics" )
parser.add_argument("--bs" , type=lowerCAmelCase , default=8 , required=lowerCAmelCase , help="batch size" )
parser.add_argument(
"--local_rank" , type=lowerCAmelCase , default=-1 , required=lowerCAmelCase , help="should be passed by distributed.launch" )
parser.add_argument(
"--n_obs" , type=lowerCAmelCase , default=lowerCAmelCase , required=lowerCAmelCase , help="How many observations. Defaults to all." )
parser.add_argument(
"--num_return_sequences" , type=lowerCAmelCase , default=1 , required=lowerCAmelCase , help="How many sequences to return" )
parser.add_argument(
"--sync_timeout" , type=lowerCAmelCase , default=600 , required=lowerCAmelCase , help="How long should master process wait for other processes to finish." , )
parser.add_argument("--src_lang" , type=lowerCAmelCase , default=lowerCAmelCase , required=lowerCAmelCase )
parser.add_argument("--tgt_lang" , type=lowerCAmelCase , default=lowerCAmelCase , required=lowerCAmelCase )
parser.add_argument(
"--prefix" , type=lowerCAmelCase , required=lowerCAmelCase , default=lowerCAmelCase , help="will be added to the begininng of src examples" )
parser.add_argument("--fp16" , action="store_true" )
parser.add_argument("--debug" , action="store_true" )
UpperCAmelCase__ : Optional[int] = time.time()
UpperCAmelCase__ , UpperCAmelCase__ : List[Any] = parser.parse_known_args()
UpperCAmelCase__ : int = parse_numeric_n_bool_cl_kwargs(lowerCAmelCase )
if generate_kwargs and args.local_rank <= 0:
print(F"parsed the following generate kwargs: {generate_kwargs}" )
UpperCAmelCase__ : Dict = Path(args.save_dir + "_tmp" )
Path(lowerCAmelCase ).mkdir(exist_ok=lowerCAmelCase ) # this handles locking.
UpperCAmelCase__ : List[str] = list(json_save_dir.glob("rank_*.json" ) )
if intermediate_files:
raise ValueError(F"Found files at {json_save_dir} please move or remove them." )
# In theory, a node could finish and save before another node hits this. If this happens, we can address later.
UpperCAmelCase__ : List[str] = {}
if args.src_lang is not None:
UpperCAmelCase__ : str = args.src_lang
if args.tgt_lang is not None:
UpperCAmelCase__ : List[str] = args.tgt_lang
Path(args.save_dir ).mkdir(exist_ok=lowerCAmelCase )
UpperCAmelCase__ , UpperCAmelCase__ : Optional[int] = eval_data_dir(
args.data_dir , lowerCAmelCase , args.model_name , type_path=args.type_path , bs=args.bs , fpaa=args.fpaa , task=args.task , local_rank=args.local_rank , n_obs=args.n_obs , max_source_length=args.max_source_length , num_return_sequences=args.num_return_sequences , prefix=args.prefix , dataset_kwargs=lowerCAmelCase , **lowerCAmelCase , )
if args.local_rank <= 0:
UpperCAmelCase__ : str = Path(args.save_dir )
save_dir.mkdir(exist_ok=lowerCAmelCase )
UpperCAmelCase__ : Tuple = gather_results_from_each_node(lowerCAmelCase , lowerCAmelCase , args.sync_timeout )
UpperCAmelCase__ : Union[str, Any] = combine_partial_results(lowerCAmelCase )
if args.num_return_sequences > 1:
UpperCAmelCase__ : int = save_dir.joinpath("pseudolabel_results.json" )
print(F"Saving aggregated results at {save_path}, intermediate in {json_save_dir}/" )
save_json(lowerCAmelCase , lowerCAmelCase )
return
UpperCAmelCase__ : Optional[Any] = Path(args.data_dir ).joinpath(args.type_path + ".target" )
with open(lowerCAmelCase ) as f:
UpperCAmelCase__ : Optional[int] = [x.rstrip() for x in f.readlines()][: len(lowerCAmelCase )]
# Calculate metrics, save metrics, and save _generations.txt
UpperCAmelCase__ : List[Any] = "translation" in args.task
UpperCAmelCase__ : Optional[Any] = calculate_bleu if calc_bleu else calculate_rouge
UpperCAmelCase__ : Optional[Any] = "bleu" if calc_bleu else "rouge"
UpperCAmelCase__ : Dict = score_fn(lowerCAmelCase , lowerCAmelCase )
UpperCAmelCase__ : List[Any] = len(lowerCAmelCase )
UpperCAmelCase__ : Union[str, Any] = time.time() - start_time
UpperCAmelCase__ : Optional[int] = round(runtime / metrics["n_obs"] , 4 )
UpperCAmelCase__ : Tuple = num_replicas
# TODO(@stas00): add whatever metadata to metrics
UpperCAmelCase__ : Any = save_dir.joinpath(F"{args.type_path}_{metric_name}.json" )
save_json(lowerCAmelCase , lowerCAmelCase , indent=lowerCAmelCase )
print(lowerCAmelCase )
write_txt_file(lowerCAmelCase , save_dir.joinpath(F"{args.type_path}_generations.txt" ) )
if args.debug:
write_txt_file(lowerCAmelCase , save_dir.joinpath(F"{args.type_path}.target" ) )
else:
shutil.rmtree(lowerCAmelCase )
def a__ ( lowerCAmelCase : Tuple ):
'''simple docstring'''
UpperCAmelCase__ : str = []
for partial_result in partial_results:
records.extend(lowerCAmelCase )
UpperCAmelCase__ : Dict = sorted(lowerCAmelCase , key=lambda lowerCAmelCase : x["id"] )
UpperCAmelCase__ : List[str] = [x["pred"] for x in records]
return preds
def a__ ( lowerCAmelCase : List[Any] , lowerCAmelCase : int , lowerCAmelCase : Optional[int] ):
'''simple docstring'''
# WAIT FOR lots of .json files
UpperCAmelCase__ : int = time.time()
logger.info("waiting for all nodes to finish" )
UpperCAmelCase__ : Dict = None
while (time.time() - start_wait) < timeout:
UpperCAmelCase__ : str = list(save_dir.glob("rank_*.json" ) )
if len(lowerCAmelCase ) < num_replicas:
continue
try:
# make sure all json files are fully saved
UpperCAmelCase__ : Union[str, Any] = lmap(lowerCAmelCase , lowerCAmelCase )
return json_data
except JSONDecodeError:
continue
else:
raise TimeoutError("Rank 0 gave up on waiting for other processes" )
# Unreachable
if __name__ == "__main__":
# Usage for MT:
run_generate()
| 660 | 1 |
"""simple docstring"""
import argparse
import torch
from transformers import (
SpeechTaConfig,
SpeechTaFeatureExtractor,
SpeechTaForSpeechToSpeech,
SpeechTaForSpeechToText,
SpeechTaForTextToSpeech,
SpeechTaProcessor,
SpeechTaTokenizer,
logging,
)
from transformers.tokenization_utils import AddedToken
logging.set_verbosity_info()
A__ : str = logging.get_logger("""transformers.models.speecht5""")
A__ : List[str] = {
"""speech_encoder_prenet.layer_norm""": """speecht5.encoder.prenet.feature_projection.layer_norm""",
"""speech_encoder_prenet.post_extract_proj""": """speecht5.encoder.prenet.feature_projection.projection""",
"""speech_encoder_prenet.pos_conv.0""": """speecht5.encoder.prenet.pos_conv_embed.conv""",
"""speech_encoder_prenet.mask_emb""": """speecht5.encoder.prenet.masked_spec_embed""",
}
A__ : Union[str, Any] = {
"""text_encoder_prenet.encoder_prenet.0""": """speecht5.encoder.prenet.embed_tokens""",
"""text_encoder_prenet.encoder_prenet.1.alpha""": """speecht5.encoder.prenet.encode_positions.alpha""",
}
A__ : str = {
"""speech_decoder_prenet.decoder_prenet.0.0.prenet.0.0""": """speecht5.decoder.prenet.layers.0""",
"""speech_decoder_prenet.decoder_prenet.0.0.prenet.1.0""": """speecht5.decoder.prenet.layers.1""",
"""speech_decoder_prenet.decoder_prenet.0.1""": """speecht5.decoder.prenet.final_layer""",
"""speech_decoder_prenet.decoder_prenet.1.alpha""": """speecht5.decoder.prenet.encode_positions.alpha""",
"""speech_decoder_prenet.spkembs_layer.0""": """speecht5.decoder.prenet.speaker_embeds_layer""",
}
A__ : Optional[int] = {
"""speech_decoder_postnet.feat_out""": """speech_decoder_postnet.feat_out""",
"""speech_decoder_postnet.prob_out""": """speech_decoder_postnet.prob_out""",
"""speech_decoder_postnet.postnet.postnet.0.0""": """speech_decoder_postnet.layers.0.conv""",
"""speech_decoder_postnet.postnet.postnet.0.1""": """speech_decoder_postnet.layers.0.batch_norm""",
"""speech_decoder_postnet.postnet.postnet.1.0""": """speech_decoder_postnet.layers.1.conv""",
"""speech_decoder_postnet.postnet.postnet.1.1""": """speech_decoder_postnet.layers.1.batch_norm""",
"""speech_decoder_postnet.postnet.postnet.2.0""": """speech_decoder_postnet.layers.2.conv""",
"""speech_decoder_postnet.postnet.postnet.2.1""": """speech_decoder_postnet.layers.2.batch_norm""",
"""speech_decoder_postnet.postnet.postnet.3.0""": """speech_decoder_postnet.layers.3.conv""",
"""speech_decoder_postnet.postnet.postnet.3.1""": """speech_decoder_postnet.layers.3.batch_norm""",
"""speech_decoder_postnet.postnet.postnet.4.0""": """speech_decoder_postnet.layers.4.conv""",
"""speech_decoder_postnet.postnet.postnet.4.1""": """speech_decoder_postnet.layers.4.batch_norm""",
}
A__ : Tuple = {
"""text_decoder_prenet.embed_tokens""": """speecht5.decoder.prenet.embed_tokens""",
}
A__ : int = {
"""text_decoder_postnet.output_projection""": """text_decoder_postnet.lm_head""",
}
A__ : int = {
"""encoder.layers.*.self_attn.k_proj""": """speecht5.encoder.wrapped_encoder.layers.*.attention.k_proj""",
"""encoder.layers.*.self_attn.v_proj""": """speecht5.encoder.wrapped_encoder.layers.*.attention.v_proj""",
"""encoder.layers.*.self_attn.q_proj""": """speecht5.encoder.wrapped_encoder.layers.*.attention.q_proj""",
"""encoder.layers.*.self_attn.out_proj""": """speecht5.encoder.wrapped_encoder.layers.*.attention.out_proj""",
"""encoder.layers.*.self_attn_layer_norm""": """speecht5.encoder.wrapped_encoder.layers.*.layer_norm""",
"""encoder.layers.*.fc1""": """speecht5.encoder.wrapped_encoder.layers.*.feed_forward.intermediate_dense""",
"""encoder.layers.*.fc2""": """speecht5.encoder.wrapped_encoder.layers.*.feed_forward.output_dense""",
"""encoder.layers.*.final_layer_norm""": """speecht5.encoder.wrapped_encoder.layers.*.final_layer_norm""",
"""encoder.layer_norm""": """speecht5.encoder.wrapped_encoder.layer_norm""",
"""encoder.pos_emb.pe_k""": """speecht5.encoder.wrapped_encoder.embed_positions.pe_k""",
}
A__ : List[Any] = {
"""decoder.layers.*.self_attn.k_proj""": """speecht5.decoder.wrapped_decoder.layers.*.self_attn.k_proj""",
"""decoder.layers.*.self_attn.v_proj""": """speecht5.decoder.wrapped_decoder.layers.*.self_attn.v_proj""",
"""decoder.layers.*.self_attn.q_proj""": """speecht5.decoder.wrapped_decoder.layers.*.self_attn.q_proj""",
"""decoder.layers.*.self_attn.out_proj""": """speecht5.decoder.wrapped_decoder.layers.*.self_attn.out_proj""",
"""decoder.layers.*.self_attn_layer_norm""": """speecht5.decoder.wrapped_decoder.layers.*.self_attn_layer_norm""",
"""decoder.layers.*.encoder_attn.k_proj""": """speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.k_proj""",
"""decoder.layers.*.encoder_attn.v_proj""": """speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.v_proj""",
"""decoder.layers.*.encoder_attn.q_proj""": """speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.q_proj""",
"""decoder.layers.*.encoder_attn.out_proj""": """speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.out_proj""",
"""decoder.layers.*.encoder_attn_layer_norm""": """speecht5.decoder.wrapped_decoder.layers.*.encoder_attn_layer_norm""",
"""decoder.layers.*.fc1""": """speecht5.decoder.wrapped_decoder.layers.*.feed_forward.intermediate_dense""",
"""decoder.layers.*.fc2""": """speecht5.decoder.wrapped_decoder.layers.*.feed_forward.output_dense""",
"""decoder.layers.*.final_layer_norm""": """speecht5.decoder.wrapped_decoder.layers.*.final_layer_norm""",
}
A__ : List[Any] = {
**MAPPING_SPEECH_ENCODER_PRENET,
**MAPPING_ENCODER,
**MAPPING_DECODER,
**MAPPING_TEXT_DECODER_PRENET,
**MAPPING_TEXT_DECODER_POSTNET,
}
A__ : Tuple = {
**MAPPING_TEXT_ENCODER_PRENET,
**MAPPING_ENCODER,
**MAPPING_DECODER,
**MAPPING_SPEECH_DECODER_PRENET,
**MAPPING_SPEECH_DECODER_POSTNET,
}
A__ : int = {
**MAPPING_SPEECH_ENCODER_PRENET,
**MAPPING_ENCODER,
**MAPPING_DECODER,
**MAPPING_SPEECH_DECODER_PRENET,
**MAPPING_SPEECH_DECODER_POSTNET,
}
A__ : List[Any] = []
A__ : int = [
"""encoder.version""",
"""encoder.layers.*.norm_k.weight""",
"""encoder.layers.*.norm_k.bias""",
"""decoder.version""",
"""decoder.layers.*.norm_k.weight""",
"""decoder.layers.*.norm_k.bias""",
"""decoder.pos_emb.pe_k""",
"""speech_encoder_prenet.embed_positions._float_tensor""",
"""text_decoder_prenet.embed_positions._float_tensor""",
]
A__ : Any = IGNORE_KEYS + [
"""encoder.proj""",
"""text_encoder_prenet.*""",
"""speech_decoder_prenet.*""",
"""speech_decoder_postnet.*""",
]
A__ : Tuple = IGNORE_KEYS + [
"""encoder.proj""",
"""speech_encoder_prenet.*""",
"""text_decoder_prenet.*""",
"""text_decoder_postnet.*""",
]
A__ : Union[str, Any] = IGNORE_KEYS + [
"""encoder.proj""",
"""text_encoder_prenet.*""",
"""text_decoder_prenet.*""",
"""text_decoder_postnet.*""",
]
def a__ ( lowerCAmelCase : Dict , lowerCAmelCase : List[str] , lowerCAmelCase : int , lowerCAmelCase : Optional[int] , lowerCAmelCase : Tuple ):
'''simple docstring'''
for attribute in key.split("." ):
UpperCAmelCase__ : Union[str, Any] = getattr(lowerCAmelCase , lowerCAmelCase )
if weight_type is not None:
UpperCAmelCase__ : List[str] = getattr(lowerCAmelCase , lowerCAmelCase ).shape
else:
UpperCAmelCase__ : Tuple = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
F"Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be"
F" {value.shape} for {full_name}" )
if weight_type == "weight":
UpperCAmelCase__ : Union[str, Any] = value
elif weight_type == "weight_g":
UpperCAmelCase__ : Optional[int] = value
elif weight_type == "weight_v":
UpperCAmelCase__ : str = value
elif weight_type == "bias":
UpperCAmelCase__ : str = value
elif weight_type == "running_mean":
UpperCAmelCase__ : Tuple = value
elif weight_type == "running_var":
UpperCAmelCase__ : Dict = value
elif weight_type == "num_batches_tracked":
UpperCAmelCase__ : int = value
else:
UpperCAmelCase__ : Any = value
logger.info(F"{key + ('.' + weight_type if weight_type is not None else '')} was initialized from {full_name}." )
def a__ ( lowerCAmelCase : Tuple , lowerCAmelCase : Union[str, Any] ):
'''simple docstring'''
for key in ignore_keys:
if key.endswith(".*" ):
if name.startswith(key[:-1] ):
return True
elif ".*." in key:
UpperCAmelCase__ , UpperCAmelCase__ : Dict = key.split(".*." )
if prefix in name and suffix in name:
return True
elif key in name:
return True
return False
def a__ ( lowerCAmelCase : Union[str, Any] , lowerCAmelCase : Optional[Any] , lowerCAmelCase : List[str] ):
'''simple docstring'''
UpperCAmelCase__ : Tuple = []
if task == "s2t":
UpperCAmelCase__ : int = hf_model.speechta.encoder.prenet.feature_encoder
UpperCAmelCase__ : Union[str, Any] = MAPPING_S2T
UpperCAmelCase__ : str = IGNORE_KEYS_S2T
elif task == "t2s":
UpperCAmelCase__ : Optional[int] = None
UpperCAmelCase__ : List[Any] = MAPPING_T2S
UpperCAmelCase__ : Any = IGNORE_KEYS_T2S
elif task == "s2s":
UpperCAmelCase__ : str = hf_model.speechta.encoder.prenet.feature_encoder
UpperCAmelCase__ : int = MAPPING_S2S
UpperCAmelCase__ : Tuple = IGNORE_KEYS_S2S
else:
raise ValueError(F"Unsupported task: {task}" )
for name, value in fairseq_dict.items():
if should_ignore(lowerCAmelCase , lowerCAmelCase ):
logger.info(F"{name} was ignored" )
continue
UpperCAmelCase__ : str = False
if "conv_layers" in name:
load_conv_layer(
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , hf_model.config.feat_extract_norm == "group" , )
UpperCAmelCase__ : Any = True
else:
for key, mapped_key in MAPPING.items():
# mapped_key = "speecht5." + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if "*" in key:
UpperCAmelCase__ , UpperCAmelCase__ : List[Any] = key.split(".*." )
if prefix in name and suffix in name:
UpperCAmelCase__ : List[str] = suffix
# if key in name or key.split("w2v_model.")[-1] == name.split(".")[0]:
if key in name:
UpperCAmelCase__ : str = True
if "*" in mapped_key:
UpperCAmelCase__ : List[Any] = name.split(lowerCAmelCase )[0].split("." )[-2]
UpperCAmelCase__ : Union[str, Any] = mapped_key.replace("*" , lowerCAmelCase )
if "weight_g" in name:
UpperCAmelCase__ : Optional[int] = "weight_g"
elif "weight_v" in name:
UpperCAmelCase__ : str = "weight_v"
elif "bias" in name:
UpperCAmelCase__ : Union[str, Any] = "bias"
elif "weight" in name:
UpperCAmelCase__ : List[Any] = "weight"
elif "running_mean" in name:
UpperCAmelCase__ : int = "running_mean"
elif "running_var" in name:
UpperCAmelCase__ : str = "running_var"
elif "num_batches_tracked" in name:
UpperCAmelCase__ : Optional[Any] = "num_batches_tracked"
else:
UpperCAmelCase__ : Dict = None
set_recursively(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
continue
if not is_used:
unused_weights.append(lowerCAmelCase )
logger.warning(F"Unused weights: {unused_weights}" )
def a__ ( lowerCAmelCase : List[Any] , lowerCAmelCase : Dict , lowerCAmelCase : List[Any] , lowerCAmelCase : Tuple , lowerCAmelCase : List[str] ):
'''simple docstring'''
UpperCAmelCase__ : List[Any] = full_name.split("conv_layers." )[-1]
UpperCAmelCase__ : List[Any] = name.split("." )
UpperCAmelCase__ : List[str] = int(items[0] )
UpperCAmelCase__ : Union[str, Any] = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
F"{full_name} has size {value.shape}, but"
F" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found." )
UpperCAmelCase__ : Any = value
logger.info(F"Feat extract conv layer {layer_id} was initialized from {full_name}." )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
F"{full_name} has size {value.shape}, but"
F" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found." )
UpperCAmelCase__ : Tuple = value
logger.info(F"Feat extract conv layer {layer_id} was initialized from {full_name}." )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
F"{full_name} has size {value.shape}, but"
F" {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found." )
UpperCAmelCase__ : Any = value
logger.info(F"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
F"{full_name} has size {value.shape}, but"
F" {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found." )
UpperCAmelCase__ : Optional[Any] = value
logger.info(F"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." )
else:
unused_weights.append(lowerCAmelCase )
@torch.no_grad()
def a__ ( lowerCAmelCase : Optional[int] , lowerCAmelCase : str , lowerCAmelCase : int , lowerCAmelCase : Union[str, Any]=None , lowerCAmelCase : Union[str, Any]=None , lowerCAmelCase : Any=None , ):
'''simple docstring'''
if config_path is not None:
UpperCAmelCase__ : Any = SpeechTaConfig.from_pretrained(lowerCAmelCase )
else:
UpperCAmelCase__ : Any = SpeechTaConfig()
if task == "s2t":
UpperCAmelCase__ : Optional[int] = config.max_text_positions
UpperCAmelCase__ : str = SpeechTaForSpeechToText(lowerCAmelCase )
elif task == "t2s":
UpperCAmelCase__ : Dict = 1876
UpperCAmelCase__ : int = 600
UpperCAmelCase__ : Tuple = config.max_speech_positions
UpperCAmelCase__ : str = SpeechTaForTextToSpeech(lowerCAmelCase )
elif task == "s2s":
UpperCAmelCase__ : List[str] = 1876
UpperCAmelCase__ : int = config.max_speech_positions
UpperCAmelCase__ : Union[str, Any] = SpeechTaForSpeechToSpeech(lowerCAmelCase )
else:
raise ValueError(F"Unknown task name: {task}" )
if vocab_path:
UpperCAmelCase__ : List[Any] = SpeechTaTokenizer(lowerCAmelCase , model_max_length=config.max_text_positions )
# Mask token behaves like a normal word, i.e. include the space before it
UpperCAmelCase__ : List[Any] = AddedToken("<mask>" , lstrip=lowerCAmelCase , rstrip=lowerCAmelCase )
UpperCAmelCase__ : Tuple = mask_token
tokenizer.add_special_tokens({"mask_token": mask_token} )
tokenizer.add_tokens(["<ctc_blank>"] )
UpperCAmelCase__ : Optional[int] = SpeechTaFeatureExtractor()
UpperCAmelCase__ : Union[str, Any] = SpeechTaProcessor(tokenizer=lowerCAmelCase , feature_extractor=lowerCAmelCase )
processor.save_pretrained(lowerCAmelCase )
UpperCAmelCase__ : int = torch.load(lowerCAmelCase )
recursively_load_weights(fairseq_checkpoint["model"] , lowerCAmelCase , lowerCAmelCase )
model.save_pretrained(lowerCAmelCase )
if repo_id:
print("Pushing to the hub..." )
processor.push_to_hub(lowerCAmelCase )
model.push_to_hub(lowerCAmelCase )
if __name__ == "__main__":
A__ : Dict = argparse.ArgumentParser()
parser.add_argument(
"""--task""",
default="""s2t""",
type=str,
help="""Type of the SpeechT5 model you'd like to convert. Should be one of 's2t', 't2s', 's2s'.""",
)
parser.add_argument("""--checkpoint_path""", required=True, default=None, type=str, help="""Path to fairseq checkpoint""")
parser.add_argument("""--vocab_path""", default=None, type=str, help="""Path to SentencePiece model""")
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""")
parser.add_argument(
"""--pytorch_dump_folder_path""", required=True, default=None, type=str, help="""Path to the output PyTorch model."""
)
parser.add_argument(
"""--push_to_hub""", default=None, type=str, help="""Where to upload the converted model on the 🤗 hub."""
)
A__ : Any = parser.parse_args()
convert_speechta_checkpoint(
args.task,
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.config_path,
args.vocab_path,
args.push_to_hub,
)
| 660 |
"""simple docstring"""
from timeit import timeit
def a__ ( lowerCAmelCase : int ):
'''simple docstring'''
if number < 0:
raise ValueError("the value of input must not be negative" )
UpperCAmelCase__ : Tuple = 0
while number:
number &= number - 1
result += 1
return result
def a__ ( lowerCAmelCase : int ):
'''simple docstring'''
if number < 0:
raise ValueError("the value of input must not be negative" )
UpperCAmelCase__ : Union[str, Any] = 0
while number:
if number % 2 == 1:
result += 1
number >>= 1
return result
def a__ ( ):
'''simple docstring'''
def do_benchmark(lowerCAmelCase : int ) -> None:
UpperCAmelCase__ : Dict = "import __main__ as z"
print(F"Benchmark when {number = }:" )
print(F"{get_set_bits_count_using_modulo_operator(lowerCAmelCase ) = }" )
UpperCAmelCase__ : Tuple = timeit("z.get_set_bits_count_using_modulo_operator(25)" , setup=lowerCAmelCase )
print(F"timeit() runs in {timing} seconds" )
print(F"{get_set_bits_count_using_brian_kernighans_algorithm(lowerCAmelCase ) = }" )
UpperCAmelCase__ : Any = timeit(
"z.get_set_bits_count_using_brian_kernighans_algorithm(25)" , setup=lowerCAmelCase , )
print(F"timeit() runs in {timing} seconds" )
for number in (25, 37, 58, 0):
do_benchmark(lowerCAmelCase )
print()
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 660 | 1 |
"""simple docstring"""
def a__ ( lowerCAmelCase : int = 10**9 ):
'''simple docstring'''
UpperCAmelCase__ : Any = 1
UpperCAmelCase__ : Tuple = 2
UpperCAmelCase__ : List[Any] = 0
UpperCAmelCase__ : List[Any] = 0
UpperCAmelCase__ : List[str] = 0
while perimeter <= max_perimeter:
perimeters_sum += perimeter
prev_value += 2 * value
value += prev_value
UpperCAmelCase__ : Any = 2 * value + 2 if i % 2 == 0 else 2 * value - 2
i += 1
return perimeters_sum
if __name__ == "__main__":
print(f"""{solution() = }""")
| 660 |
"""simple docstring"""
import unittest
from transformers import load_tool
from .test_tools_common import ToolTesterMixin
class _lowercase ( unittest.TestCase , lowerCAmelCase_ ):
'''simple docstring'''
def lowerCAmelCase__ ( self )-> Dict:
UpperCAmelCase__ : Optional[Any] = load_tool("text-classification" )
self.tool.setup()
UpperCAmelCase__ : List[str] = load_tool("text-classification" , remote=__UpperCamelCase )
def lowerCAmelCase__ ( self )-> Union[str, Any]:
UpperCAmelCase__ : Dict = self.tool("That's quite cool" , ["positive", "negative"] )
self.assertEqual(__UpperCamelCase , "positive" )
def lowerCAmelCase__ ( self )-> str:
UpperCAmelCase__ : List[Any] = self.remote_tool("That's quite cool" , ["positive", "negative"] )
self.assertEqual(__UpperCamelCase , "positive" )
def lowerCAmelCase__ ( self )-> Optional[int]:
UpperCAmelCase__ : Any = self.tool(text="That's quite cool" , labels=["positive", "negative"] )
self.assertEqual(__UpperCamelCase , "positive" )
def lowerCAmelCase__ ( self )-> str:
UpperCAmelCase__ : str = self.remote_tool(text="That's quite cool" , labels=["positive", "negative"] )
self.assertEqual(__UpperCamelCase , "positive" )
| 660 | 1 |
"""simple docstring"""
import os
from pathlib import Path
def a__ ( ):
'''simple docstring'''
from torch.utils.cpp_extension import load
UpperCAmelCase__ : Tuple = Path(lowerCAmelCase ).resolve().parent.parent.parent / "kernels" / "deformable_detr"
UpperCAmelCase__ : List[str] = [
root / filename
for filename in [
"vision.cpp",
os.path.join("cpu" , "ms_deform_attn_cpu.cpp" ),
os.path.join("cuda" , "ms_deform_attn_cuda.cu" ),
]
]
load(
"MultiScaleDeformableAttention" , lowerCAmelCase , with_cuda=lowerCAmelCase , extra_include_paths=[str(lowerCAmelCase )] , extra_cflags=["-DWITH_CUDA=1"] , extra_cuda_cflags=[
"-DCUDA_HAS_FP16=1",
"-D__CUDA_NO_HALF_OPERATORS__",
"-D__CUDA_NO_HALF_CONVERSIONS__",
"-D__CUDA_NO_HALF2_OPERATORS__",
] , )
import MultiScaleDeformableAttention as MSDA
return MSDA
| 660 |
"""simple docstring"""
def a__ ( lowerCAmelCase : list , lowerCAmelCase : list ):
'''simple docstring'''
_validate_point(lowerCAmelCase )
_validate_point(lowerCAmelCase )
if len(lowerCAmelCase ) != len(lowerCAmelCase ):
raise ValueError("Both points must be in the same n-dimensional space" )
return float(sum(abs(a - b ) for a, b in zip(lowerCAmelCase , lowerCAmelCase ) ) )
def a__ ( lowerCAmelCase : list[float] ):
'''simple docstring'''
if point:
if isinstance(lowerCAmelCase , lowerCAmelCase ):
for item in point:
if not isinstance(lowerCAmelCase , (int, float) ):
UpperCAmelCase__ : Tuple = (
"Expected a list of numbers as input, found "
F"{type(lowerCAmelCase ).__name__}"
)
raise TypeError(lowerCAmelCase )
else:
UpperCAmelCase__ : Dict = F"Expected a list of numbers as input, found {type(lowerCAmelCase ).__name__}"
raise TypeError(lowerCAmelCase )
else:
raise ValueError("Missing an input" )
def a__ ( lowerCAmelCase : list , lowerCAmelCase : list ):
'''simple docstring'''
_validate_point(lowerCAmelCase )
_validate_point(lowerCAmelCase )
if len(lowerCAmelCase ) != len(lowerCAmelCase ):
raise ValueError("Both points must be in the same n-dimensional space" )
return float(sum(abs(x - y ) for x, y in zip(lowerCAmelCase , lowerCAmelCase ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 660 | 1 |
"""simple docstring"""
def a__ ( lowerCAmelCase : dict ):
'''simple docstring'''
UpperCAmelCase__ : Tuple = set()
# edges = list of graph's edges
UpperCAmelCase__ : Tuple = get_edges(lowerCAmelCase )
# While there are still elements in edges list, take an arbitrary edge
# (from_node, to_node) and add his extremity to chosen_vertices and then
# remove all arcs adjacent to the from_node and to_node
while edges:
UpperCAmelCase__ , UpperCAmelCase__ : int = edges.pop()
chosen_vertices.add(lowerCAmelCase )
chosen_vertices.add(lowerCAmelCase )
for edge in edges.copy():
if from_node in edge or to_node in edge:
edges.discard(lowerCAmelCase )
return chosen_vertices
def a__ ( lowerCAmelCase : dict ):
'''simple docstring'''
UpperCAmelCase__ : Tuple = set()
for from_node, to_nodes in graph.items():
for to_node in to_nodes:
edges.add((from_node, to_node) )
return edges
if __name__ == "__main__":
import doctest
doctest.testmod()
# graph = {0: [1, 3], 1: [0, 3], 2: [0, 3, 4], 3: [0, 1, 2], 4: [2, 3]}
# print(f"Matching vertex cover:\n{matching_min_vertex_cover(graph)}")
| 660 |
"""simple docstring"""
import math
def a__ ( lowerCAmelCase : int ):
'''simple docstring'''
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(lowerCAmelCase ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def a__ ( lowerCAmelCase : int = 1_0001 ):
'''simple docstring'''
try:
UpperCAmelCase__ : List[str] = int(lowerCAmelCase )
except (TypeError, ValueError):
raise TypeError("Parameter nth must be int or castable to int." ) from None
if nth <= 0:
raise ValueError("Parameter nth must be greater than or equal to one." )
UpperCAmelCase__ : list[int] = []
UpperCAmelCase__ : str = 2
while len(lowerCAmelCase ) < nth:
if is_prime(lowerCAmelCase ):
primes.append(lowerCAmelCase )
num += 1
else:
num += 1
return primes[len(lowerCAmelCase ) - 1]
if __name__ == "__main__":
print(f"""{solution() = }""")
| 660 | 1 |
"""simple docstring"""
from __future__ import annotations
A__ : Optional[Any] = [
[-1, 0], # left
[0, -1], # down
[1, 0], # right
[0, 1], # up
]
def a__ ( lowerCAmelCase : list[list[int]] , lowerCAmelCase : list[int] , lowerCAmelCase : list[int] , lowerCAmelCase : int , lowerCAmelCase : list[list[int]] , ):
'''simple docstring'''
UpperCAmelCase__ : Any = [
[0 for col in range(len(grid[0] ) )] for row in range(len(lowerCAmelCase ) )
] # the reference grid
UpperCAmelCase__ : Tuple = 1
UpperCAmelCase__ : List[str] = [
[0 for col in range(len(grid[0] ) )] for row in range(len(lowerCAmelCase ) )
] # the action grid
UpperCAmelCase__ : Optional[int] = init[0]
UpperCAmelCase__ : Optional[int] = init[1]
UpperCAmelCase__ : Optional[Any] = 0
UpperCAmelCase__ : int = g + heuristic[x][y] # cost from starting cell to destination cell
UpperCAmelCase__ : Any = [[f, g, x, y]]
UpperCAmelCase__ : Any = False # flag that is set when search is complete
UpperCAmelCase__ : Tuple = False # flag set if we can't find expand
while not found and not resign:
if len(lowerCAmelCase ) == 0:
raise ValueError("Algorithm is unable to find solution" )
else: # to choose the least costliest action so as to move closer to the goal
cell.sort()
cell.reverse()
UpperCAmelCase__ : List[Any] = cell.pop()
UpperCAmelCase__ : Dict = next_cell[2]
UpperCAmelCase__ : Tuple = next_cell[3]
UpperCAmelCase__ : Dict = next_cell[1]
if x == goal[0] and y == goal[1]:
UpperCAmelCase__ : Dict = True
else:
for i in range(len(lowerCAmelCase ) ): # to try out different valid actions
UpperCAmelCase__ : Optional[int] = x + DIRECTIONS[i][0]
UpperCAmelCase__ : List[str] = y + DIRECTIONS[i][1]
if xa >= 0 and xa < len(lowerCAmelCase ) and ya >= 0 and ya < len(grid[0] ):
if closed[xa][ya] == 0 and grid[xa][ya] == 0:
UpperCAmelCase__ : List[Any] = g + cost
UpperCAmelCase__ : List[Any] = ga + heuristic[xa][ya]
cell.append([fa, ga, xa, ya] )
UpperCAmelCase__ : Any = 1
UpperCAmelCase__ : List[Any] = i
UpperCAmelCase__ : str = []
UpperCAmelCase__ : Optional[int] = goal[0]
UpperCAmelCase__ : str = goal[1]
invpath.append([x, y] ) # we get the reverse path from here
while x != init[0] or y != init[1]:
UpperCAmelCase__ : int = x - DIRECTIONS[action[x][y]][0]
UpperCAmelCase__ : Optional[int] = y - DIRECTIONS[action[x][y]][1]
UpperCAmelCase__ : Dict = xa
UpperCAmelCase__ : Tuple = ya
invpath.append([x, y] )
UpperCAmelCase__ : Tuple = []
for i in range(len(lowerCAmelCase ) ):
path.append(invpath[len(lowerCAmelCase ) - 1 - i] )
return path, action
if __name__ == "__main__":
A__ : int = [
[0, 1, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 1, 0, 0, 0, 0],
[0, 1, 0, 0, 1, 0],
[0, 0, 0, 0, 1, 0],
]
A__ : Tuple = [0, 0]
# all coordinates are given in format [y,x]
A__ : List[Any] = [len(grid) - 1, len(grid[0]) - 1]
A__ : List[Any] = 1
# the cost map which pushes the path closer to the goal
A__ : int = [[0 for row in range(len(grid[0]))] for col in range(len(grid))]
for i in range(len(grid)):
for j in range(len(grid[0])):
A__ : List[Any] = abs(i - goal[0]) + abs(j - goal[1])
if grid[i][j] == 1:
# added extra penalty in the heuristic map
A__ : int = 99
A__ , A__ : Optional[int] = search(grid, init, goal, cost, heuristic)
print("""ACTION MAP""")
for i in range(len(action)):
print(action[i])
for i in range(len(path)):
print(path[i])
| 660 |
"""simple docstring"""
import inspect
import math
import tempfile
import unittest
import numpy as np
from transformers import ViTMAEConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTMAEForPreTraining, ViTMAEModel
from transformers.models.vit.modeling_vit import VIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class _lowercase :
'''simple docstring'''
def __init__( self , __UpperCamelCase , __UpperCamelCase=13 , __UpperCamelCase=30 , __UpperCamelCase=2 , __UpperCamelCase=3 , __UpperCamelCase=True , __UpperCamelCase=True , __UpperCamelCase=32 , __UpperCamelCase=5 , __UpperCamelCase=4 , __UpperCamelCase=37 , __UpperCamelCase="gelu" , __UpperCamelCase=0.1 , __UpperCamelCase=0.1 , __UpperCamelCase=10 , __UpperCamelCase=0.02 , __UpperCamelCase=3 , __UpperCamelCase=0.6 , __UpperCamelCase=None , )-> List[Any]:
UpperCAmelCase__ : str = parent
UpperCAmelCase__ : Optional[Any] = batch_size
UpperCAmelCase__ : Any = image_size
UpperCAmelCase__ : Dict = patch_size
UpperCAmelCase__ : Optional[Any] = num_channels
UpperCAmelCase__ : Union[str, Any] = is_training
UpperCAmelCase__ : Any = use_labels
UpperCAmelCase__ : List[Any] = hidden_size
UpperCAmelCase__ : Dict = num_hidden_layers
UpperCAmelCase__ : List[str] = num_attention_heads
UpperCAmelCase__ : List[Any] = intermediate_size
UpperCAmelCase__ : Dict = hidden_act
UpperCAmelCase__ : List[Any] = hidden_dropout_prob
UpperCAmelCase__ : Optional[int] = attention_probs_dropout_prob
UpperCAmelCase__ : Optional[Any] = type_sequence_label_size
UpperCAmelCase__ : Union[str, Any] = initializer_range
UpperCAmelCase__ : int = mask_ratio
UpperCAmelCase__ : Tuple = scope
# in ViTMAE, the expected sequence length = (num_patches + 1) * (1 - config.mask_ratio), rounded above
# (we add 1 for the [CLS] token)
UpperCAmelCase__ : int = (image_size // patch_size) ** 2
UpperCAmelCase__ : str = int(math.ceil((1 - mask_ratio) * (num_patches + 1) ) )
def lowerCAmelCase__ ( self )-> List[Any]:
UpperCAmelCase__ : List[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCAmelCase__ : Optional[Any] = None
if self.use_labels:
UpperCAmelCase__ : Dict = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCAmelCase__ : Optional[int] = self.get_config()
return config, pixel_values, labels
def lowerCAmelCase__ ( self )-> int:
return ViTMAEConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=__UpperCamelCase , initializer_range=self.initializer_range , mask_ratio=self.mask_ratio , )
def lowerCAmelCase__ ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )-> Union[str, Any]:
UpperCAmelCase__ : Optional[Any] = ViTMAEModel(config=__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
UpperCAmelCase__ : List[str] = model(__UpperCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCAmelCase__ ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )-> Optional[int]:
UpperCAmelCase__ : List[Any] = ViTMAEForPreTraining(__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
UpperCAmelCase__ : Optional[int] = model(__UpperCamelCase )
UpperCAmelCase__ : List[str] = (self.image_size // self.patch_size) ** 2
UpperCAmelCase__ : List[str] = self.patch_size**2 * self.num_channels
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) )
# test greyscale images
UpperCAmelCase__ : Dict = 1
UpperCAmelCase__ : str = ViTMAEForPreTraining(__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
UpperCAmelCase__ : int = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
UpperCAmelCase__ : List[str] = model(__UpperCamelCase )
UpperCAmelCase__ : List[str] = self.patch_size**2
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) )
def lowerCAmelCase__ ( self )-> Dict:
UpperCAmelCase__ : Tuple = self.prepare_config_and_inputs()
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : str = config_and_inputs
UpperCAmelCase__ : int = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class _lowercase ( lowerCAmelCase_ , lowerCAmelCase_ , unittest.TestCase ):
'''simple docstring'''
_A = (ViTMAEModel, ViTMAEForPreTraining) if is_torch_available() else ()
_A = {'feature-extraction': ViTMAEModel} if is_torch_available() else {}
_A = False
_A = False
_A = False
_A = False
def lowerCAmelCase__ ( self )-> List[Any]:
UpperCAmelCase__ : Any = ViTMAEModelTester(self )
UpperCAmelCase__ : List[str] = ConfigTester(self , config_class=__UpperCamelCase , has_text_modality=__UpperCamelCase , hidden_size=37 )
def lowerCAmelCase__ ( self )-> int:
self.config_tester.run_common_tests()
@unittest.skip(reason="ViTMAE does not use inputs_embeds" )
def lowerCAmelCase__ ( self )-> Dict:
pass
def lowerCAmelCase__ ( self )-> List[str]:
UpperCAmelCase__ , UpperCAmelCase__ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase__ : int = model_class(__UpperCamelCase )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
UpperCAmelCase__ : Tuple = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__UpperCamelCase , nn.Linear ) )
def lowerCAmelCase__ ( self )-> Optional[Any]:
UpperCAmelCase__ , UpperCAmelCase__ : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase__ : List[str] = model_class(__UpperCamelCase )
UpperCAmelCase__ : Dict = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase__ : Dict = [*signature.parameters.keys()]
UpperCAmelCase__ : Tuple = ["pixel_values"]
self.assertListEqual(arg_names[:1] , __UpperCamelCase )
def lowerCAmelCase__ ( self )-> Any:
UpperCAmelCase__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__UpperCamelCase )
def lowerCAmelCase__ ( self )-> Dict:
UpperCAmelCase__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*__UpperCamelCase )
def lowerCAmelCase__ ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )-> Dict:
# make masks reproducible
np.random.seed(2 )
UpperCAmelCase__ : Tuple = int((pt_model.config.image_size // pt_model.config.patch_size) ** 2 )
UpperCAmelCase__ : Union[str, Any] = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
UpperCAmelCase__ : str = torch.from_numpy(__UpperCamelCase )
# Add `noise` argument.
# PT inputs will be prepared in `super().check_pt_tf_models()` with this added `noise` argument
UpperCAmelCase__ : Optional[Any] = pt_noise
super().check_pt_tf_models(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
def lowerCAmelCase__ ( self )-> List[Any]:
UpperCAmelCase__ , UpperCAmelCase__ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase__ : List[Any] = model_class(__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
# make random mask reproducible
torch.manual_seed(2 )
with torch.no_grad():
UpperCAmelCase__ : Optional[int] = model(**self._prepare_for_class(__UpperCamelCase , __UpperCamelCase ) )
UpperCAmelCase__ : Optional[Any] = outputs[0].cpu().numpy()
UpperCAmelCase__ : Union[str, Any] = 0
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(__UpperCamelCase )
UpperCAmelCase__ : Union[str, Any] = model_class.from_pretrained(__UpperCamelCase )
model.to(__UpperCamelCase )
# make random mask reproducible
torch.manual_seed(2 )
with torch.no_grad():
UpperCAmelCase__ : List[Any] = model(**self._prepare_for_class(__UpperCamelCase , __UpperCamelCase ) )
# Make sure we don't have nans
UpperCAmelCase__ : Tuple = after_outputs[0].cpu().numpy()
UpperCAmelCase__ : int = 0
UpperCAmelCase__ : str = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(__UpperCamelCase , 1E-5 )
@unittest.skip(
reason="ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load\n to get deterministic results." )
def lowerCAmelCase__ ( self )-> List[str]:
pass
@unittest.skip(
reason="ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load\n to get deterministic results." )
def lowerCAmelCase__ ( self )-> Any:
pass
@unittest.skip(
reason="ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load\n to get deterministic results." )
def lowerCAmelCase__ ( self )-> Optional[Any]:
pass
@unittest.skip(reason="ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load" )
def lowerCAmelCase__ ( self )-> List[Any]:
pass
@unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." )
def lowerCAmelCase__ ( self )-> Union[str, Any]:
pass
@slow
def lowerCAmelCase__ ( self )-> Union[str, Any]:
for model_name in VIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase__ : Tuple = ViTMAEModel.from_pretrained(__UpperCamelCase )
self.assertIsNotNone(__UpperCamelCase )
def a__ ( ):
'''simple docstring'''
UpperCAmelCase__ : Union[str, Any] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class _lowercase ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def lowerCAmelCase__ ( self )-> List[Any]:
return ViTImageProcessor.from_pretrained("facebook/vit-mae-base" ) if is_vision_available() else None
@slow
def lowerCAmelCase__ ( self )-> Optional[int]:
# make random mask reproducible across the PT and TF model
np.random.seed(2 )
UpperCAmelCase__ : Any = ViTMAEForPreTraining.from_pretrained("facebook/vit-mae-base" ).to(__UpperCamelCase )
UpperCAmelCase__ : Tuple = self.default_image_processor
UpperCAmelCase__ : List[Any] = prepare_img()
UpperCAmelCase__ : Optional[Any] = image_processor(images=__UpperCamelCase , return_tensors="pt" ).to(__UpperCamelCase )
# prepare a noise vector that will be also used for testing the TF model
# (this way we can ensure that the PT and TF models operate on the same inputs)
UpperCAmelCase__ : List[Any] = ViTMAEConfig()
UpperCAmelCase__ : str = int((vit_mae_config.image_size // vit_mae_config.patch_size) ** 2 )
UpperCAmelCase__ : Optional[int] = np.random.uniform(size=(1, num_patches) )
# forward pass
with torch.no_grad():
UpperCAmelCase__ : List[str] = model(**__UpperCamelCase , noise=torch.from_numpy(__UpperCamelCase ).to(device=__UpperCamelCase ) )
# verify the logits
UpperCAmelCase__ : str = torch.Size((1, 1_96, 7_68) )
self.assertEqual(outputs.logits.shape , __UpperCamelCase )
UpperCAmelCase__ : List[Any] = torch.tensor(
[[-0.0548, -1.7023, -0.9325], [0.3721, -0.5670, -0.2233], [0.8235, -1.3878, -0.3524]] )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3] , expected_slice.to(__UpperCamelCase ) , atol=1E-4 ) )
| 660 | 1 |
"""simple docstring"""
import collections
import os
from typing import List, Optional, Tuple
from transformers.utils import is_jieba_available, requires_backends
if is_jieba_available():
import jieba
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
A__ : Optional[int] = logging.get_logger(__name__)
A__ : int = {"""vocab_file""": """vocab.txt"""}
A__ : Dict = {
"""vocab_file""": {
"""openbmb/cpm-ant-10b""": """https://huggingface.co/openbmb/cpm-ant-10b/blob/main/vocab.txt""",
},
}
A__ : Tuple = {
"""openbmb/cpm-ant-10b""": 1_024,
}
def a__ ( lowerCAmelCase : int ):
'''simple docstring'''
UpperCAmelCase__ : List[Any] = collections.OrderedDict()
with open(lowerCAmelCase , "r" , encoding="utf-8" ) as reader:
UpperCAmelCase__ : int = reader.readlines()
for index, token in enumerate(lowerCAmelCase ):
UpperCAmelCase__ : Union[str, Any] = token.rstrip("\n" )
UpperCAmelCase__ : Any = index
return vocab
class _lowercase ( lowerCAmelCase_ ):
'''simple docstring'''
def __init__( self , __UpperCamelCase , __UpperCamelCase="<unk>" , __UpperCamelCase=2_00 )-> Optional[Any]:
UpperCAmelCase__ : Tuple = vocab
UpperCAmelCase__ : int = unk_token
UpperCAmelCase__ : str = max_input_chars_per_word
def lowerCAmelCase__ ( self , __UpperCamelCase )-> Union[str, Any]:
UpperCAmelCase__ : Dict = list(__UpperCamelCase )
if len(__UpperCamelCase ) > self.max_input_chars_per_word:
return [self.unk_token]
UpperCAmelCase__ : List[str] = 0
UpperCAmelCase__ : Union[str, Any] = []
while start < len(__UpperCamelCase ):
UpperCAmelCase__ : Union[str, Any] = len(__UpperCamelCase )
UpperCAmelCase__ : Optional[int] = None
while start < end:
UpperCAmelCase__ : Optional[Any] = "".join(chars[start:end] )
if substr in self.vocab:
UpperCAmelCase__ : Union[str, Any] = substr
break
end -= 1
if cur_substr is None:
sub_tokens.append(self.unk_token )
start += 1
else:
sub_tokens.append(__UpperCamelCase )
UpperCAmelCase__ : int = end
return sub_tokens
class _lowercase ( lowerCAmelCase_ ):
'''simple docstring'''
_A = VOCAB_FILES_NAMES
_A = PRETRAINED_VOCAB_FILES_MAP
_A = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_A = ['input_ids', 'attention_mask']
_A = False
def __init__( self , __UpperCamelCase , __UpperCamelCase="<d>" , __UpperCamelCase="</d>" , __UpperCamelCase="<s>" , __UpperCamelCase="</s>" , __UpperCamelCase="<pad>" , __UpperCamelCase="<unk>" , __UpperCamelCase="</n>" , __UpperCamelCase="</_>" , __UpperCamelCase="left" , **__UpperCamelCase , )-> int:
requires_backends(self , ["jieba"] )
super().__init__(
bod_token=__UpperCamelCase , eod_token=__UpperCamelCase , bos_token=__UpperCamelCase , eos_token=__UpperCamelCase , pad_token=__UpperCamelCase , unk_token=__UpperCamelCase , line_token=__UpperCamelCase , space_token=__UpperCamelCase , padding_side=__UpperCamelCase , **__UpperCamelCase , )
UpperCAmelCase__ : List[Any] = bod_token
UpperCAmelCase__ : Any = eod_token
UpperCAmelCase__ : int = load_vocab(__UpperCamelCase )
UpperCAmelCase__ : int = self.encoder[space_token]
UpperCAmelCase__ : Dict = self.encoder[line_token]
del self.encoder[space_token]
del self.encoder[line_token]
UpperCAmelCase__ : Dict = collections.OrderedDict(sorted(self.encoder.items() , key=lambda __UpperCamelCase : x[1] ) )
UpperCAmelCase__ : int = {v: k for k, v in self.encoder.items()}
UpperCAmelCase__ : str = WordpieceTokenizer(vocab=self.encoder , unk_token=self.unk_token )
@property
def lowerCAmelCase__ ( self )-> int:
return self.encoder[self.bod_token]
@property
def lowerCAmelCase__ ( self )-> int:
return self.encoder[self.eod_token]
@property
def lowerCAmelCase__ ( self )-> List[str]:
return self.encoder["\n"]
@property
def lowerCAmelCase__ ( self )-> int:
return len(self.encoder )
def lowerCAmelCase__ ( self )-> List[str]:
return dict(self.encoder , **self.added_tokens_encoder )
def lowerCAmelCase__ ( self , __UpperCamelCase )-> int:
UpperCAmelCase__ : List[Any] = []
for x in jieba.cut(__UpperCamelCase , cut_all=__UpperCamelCase ):
output_tokens.extend(self.wordpiece_tokenizer.tokenize(__UpperCamelCase ) )
return output_tokens
def lowerCAmelCase__ ( self , __UpperCamelCase , **__UpperCamelCase )-> Any:
UpperCAmelCase__ : Union[str, Any] = [i for i in token_ids if i >= 0]
UpperCAmelCase__ : Optional[int] = [
x for x in token_ids if x != self.pad_token_id and x != self.eos_token_id and x != self.bos_token_id
]
return super()._decode(__UpperCamelCase , **__UpperCamelCase )
def lowerCAmelCase__ ( self , __UpperCamelCase )-> List[str]:
return token in self.encoder
def lowerCAmelCase__ ( self , __UpperCamelCase )-> str:
return "".join(__UpperCamelCase )
def lowerCAmelCase__ ( self , __UpperCamelCase )-> Union[str, Any]:
return self.encoder.get(__UpperCamelCase , self.encoder.get(self.unk_token ) )
def lowerCAmelCase__ ( self , __UpperCamelCase )-> List[str]:
return self.decoder.get(__UpperCamelCase , self.unk_token )
def lowerCAmelCase__ ( self , __UpperCamelCase , __UpperCamelCase = None )-> Tuple[str]:
if os.path.isdir(__UpperCamelCase ):
UpperCAmelCase__ : Optional[Any] = os.path.join(
__UpperCamelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
else:
UpperCAmelCase__ : List[str] = (filename_prefix + "-" if filename_prefix else "") + save_directory
UpperCAmelCase__ : List[Any] = 0
if " " in self.encoder:
UpperCAmelCase__ : int = self.encoder[" "]
del self.encoder[" "]
if "\n" in self.encoder:
UpperCAmelCase__ : int = self.encoder["\n"]
del self.encoder["\n"]
UpperCAmelCase__ : str = collections.OrderedDict(sorted(self.encoder.items() , key=lambda __UpperCamelCase : x[1] ) )
with open(__UpperCamelCase , "w" , encoding="utf-8" ) as writer:
for token, token_index in self.encoder.items():
if index != token_index:
logger.warning(
F"Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive."
" Please check that the vocabulary is not corrupted!" )
UpperCAmelCase__ : List[str] = token_index
writer.write(token + "\n" )
index += 1
return (vocab_file,)
def lowerCAmelCase__ ( self , __UpperCamelCase , __UpperCamelCase = None )-> List[int]:
if token_ids_a is None:
return [self.bos_token_id] + token_ids_a
return [self.bos_token_id] + token_ids_a + [self.bos_token_id] + token_ids_a
def lowerCAmelCase__ ( self , __UpperCamelCase , __UpperCamelCase = None , __UpperCamelCase = False )-> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__UpperCamelCase , token_ids_a=__UpperCamelCase , already_has_special_tokens=__UpperCamelCase )
if token_ids_a is not None:
return [1] + ([0] * len(__UpperCamelCase )) + [1] + ([0] * len(__UpperCamelCase ))
return [1] + ([0] * len(__UpperCamelCase ))
| 660 |
"""simple docstring"""
# DISCLAIMER: This file is strongly influenced by https://github.com/ermongroup/ddim
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import flax
import jax
import jax.numpy as jnp
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils_flax import (
CommonSchedulerState,
FlaxKarrasDiffusionSchedulers,
FlaxSchedulerMixin,
FlaxSchedulerOutput,
add_noise_common,
get_velocity_common,
)
@flax.struct.dataclass
class _lowercase :
'''simple docstring'''
_A = 42
# setable values
_A = 42
_A = 42
_A = None
@classmethod
def lowerCAmelCase__ ( cls , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )-> Union[str, Any]:
return cls(common=__UpperCamelCase , init_noise_sigma=__UpperCamelCase , timesteps=__UpperCamelCase )
@dataclass
class _lowercase ( lowerCAmelCase_ ):
'''simple docstring'''
_A = 42
class _lowercase ( lowerCAmelCase_ , lowerCAmelCase_ ):
'''simple docstring'''
_A = [e.name for e in FlaxKarrasDiffusionSchedulers]
_A = 42
@property
def lowerCAmelCase__ ( self )-> Optional[int]:
return True
@register_to_config
def __init__( self , __UpperCamelCase = 10_00 , __UpperCamelCase = 0.0001 , __UpperCamelCase = 0.02 , __UpperCamelCase = "linear" , __UpperCamelCase = None , __UpperCamelCase = "fixed_small" , __UpperCamelCase = True , __UpperCamelCase = "epsilon" , __UpperCamelCase = jnp.floataa , )-> List[str]:
UpperCAmelCase__ : int = dtype
def lowerCAmelCase__ ( self , __UpperCamelCase = None )-> DDPMSchedulerState:
if common is None:
UpperCAmelCase__ : int = CommonSchedulerState.create(self )
# standard deviation of the initial noise distribution
UpperCAmelCase__ : Tuple = jnp.array(1.0 , dtype=self.dtype )
UpperCAmelCase__ : Tuple = jnp.arange(0 , self.config.num_train_timesteps ).round()[::-1]
return DDPMSchedulerState.create(
common=__UpperCamelCase , init_noise_sigma=__UpperCamelCase , timesteps=__UpperCamelCase , )
def lowerCAmelCase__ ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = None )-> jnp.ndarray:
return sample
def lowerCAmelCase__ ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = () )-> DDPMSchedulerState:
UpperCAmelCase__ : Dict = self.config.num_train_timesteps // num_inference_steps
# creates integer timesteps by multiplying by ratio
# rounding to avoid issues when num_inference_step is power of 3
UpperCAmelCase__ : Optional[int] = (jnp.arange(0 , __UpperCamelCase ) * step_ratio).round()[::-1]
return state.replace(
num_inference_steps=__UpperCamelCase , timesteps=__UpperCamelCase , )
def lowerCAmelCase__ ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase=None , __UpperCamelCase=None )-> Union[str, Any]:
UpperCAmelCase__ : Optional[Any] = state.common.alphas_cumprod[t]
UpperCAmelCase__ : int = jnp.where(t > 0 , state.common.alphas_cumprod[t - 1] , jnp.array(1.0 , dtype=self.dtype ) )
# For t > 0, compute predicted variance βt (see formula (6) and (7) from https://arxiv.org/pdf/2006.11239.pdf)
# and sample from it to get previous sample
# x_{t-1} ~ N(pred_prev_sample, variance) == add variance to pred_sample
UpperCAmelCase__ : Any = (1 - alpha_prod_t_prev) / (1 - alpha_prod_t) * state.common.betas[t]
if variance_type is None:
UpperCAmelCase__ : Union[str, Any] = self.config.variance_type
# hacks - were probably added for training stability
if variance_type == "fixed_small":
UpperCAmelCase__ : Dict = jnp.clip(__UpperCamelCase , a_min=1E-20 )
# for rl-diffuser https://arxiv.org/abs/2205.09991
elif variance_type == "fixed_small_log":
UpperCAmelCase__ : Tuple = jnp.log(jnp.clip(__UpperCamelCase , a_min=1E-20 ) )
elif variance_type == "fixed_large":
UpperCAmelCase__ : Union[str, Any] = state.common.betas[t]
elif variance_type == "fixed_large_log":
# Glide max_log
UpperCAmelCase__ : Optional[int] = jnp.log(state.common.betas[t] )
elif variance_type == "learned":
return predicted_variance
elif variance_type == "learned_range":
UpperCAmelCase__ : List[str] = variance
UpperCAmelCase__ : Union[str, Any] = state.common.betas[t]
UpperCAmelCase__ : Optional[int] = (predicted_variance + 1) / 2
UpperCAmelCase__ : Any = frac * max_log + (1 - frac) * min_log
return variance
def lowerCAmelCase__ ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = None , __UpperCamelCase = True , )-> Union[FlaxDDPMSchedulerOutput, Tuple]:
UpperCAmelCase__ : List[str] = timestep
if key is None:
UpperCAmelCase__ : int = jax.random.PRNGKey(0 )
if model_output.shape[1] == sample.shape[1] * 2 and self.config.variance_type in ["learned", "learned_range"]:
UpperCAmelCase__ , UpperCAmelCase__ : List[str] = jnp.split(__UpperCamelCase , sample.shape[1] , axis=1 )
else:
UpperCAmelCase__ : Optional[Any] = None
# 1. compute alphas, betas
UpperCAmelCase__ : Union[str, Any] = state.common.alphas_cumprod[t]
UpperCAmelCase__ : Tuple = jnp.where(t > 0 , state.common.alphas_cumprod[t - 1] , jnp.array(1.0 , dtype=self.dtype ) )
UpperCAmelCase__ : Union[str, Any] = 1 - alpha_prod_t
UpperCAmelCase__ : Tuple = 1 - alpha_prod_t_prev
# 2. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf
if self.config.prediction_type == "epsilon":
UpperCAmelCase__ : List[Any] = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
elif self.config.prediction_type == "sample":
UpperCAmelCase__ : Any = model_output
elif self.config.prediction_type == "v_prediction":
UpperCAmelCase__ : Union[str, Any] = (alpha_prod_t**0.5) * sample - (beta_prod_t**0.5) * model_output
else:
raise ValueError(
F"prediction_type given as {self.config.prediction_type} must be one of `epsilon`, `sample` "
" for the FlaxDDPMScheduler." )
# 3. Clip "predicted x_0"
if self.config.clip_sample:
UpperCAmelCase__ : List[Any] = jnp.clip(__UpperCamelCase , -1 , 1 )
# 4. Compute coefficients for pred_original_sample x_0 and current sample x_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
UpperCAmelCase__ : List[str] = (alpha_prod_t_prev ** 0.5 * state.common.betas[t]) / beta_prod_t
UpperCAmelCase__ : List[Any] = state.common.alphas[t] ** 0.5 * beta_prod_t_prev / beta_prod_t
# 5. Compute predicted previous sample µ_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
UpperCAmelCase__ : Tuple = pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample
# 6. Add noise
def random_variance():
UpperCAmelCase__ : Any = jax.random.split(__UpperCamelCase , num=1 )
UpperCAmelCase__ : int = jax.random.normal(__UpperCamelCase , shape=model_output.shape , dtype=self.dtype )
return (self._get_variance(__UpperCamelCase , __UpperCamelCase , predicted_variance=__UpperCamelCase ) ** 0.5) * noise
UpperCAmelCase__ : Dict = jnp.where(t > 0 , random_variance() , jnp.zeros(model_output.shape , dtype=self.dtype ) )
UpperCAmelCase__ : Any = pred_prev_sample + variance
if not return_dict:
return (pred_prev_sample, state)
return FlaxDDPMSchedulerOutput(prev_sample=__UpperCamelCase , state=__UpperCamelCase )
def lowerCAmelCase__ ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , )-> jnp.ndarray:
return add_noise_common(state.common , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
def lowerCAmelCase__ ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , )-> jnp.ndarray:
return get_velocity_common(state.common , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
def __len__( self )-> Tuple:
return self.config.num_train_timesteps
| 660 | 1 |
"""simple docstring"""
from typing import List
import datasets
from datasets.tasks import AudioClassification
from ..folder_based_builder import folder_based_builder
A__ : Optional[Any] = datasets.utils.logging.get_logger(__name__)
class _lowercase ( folder_based_builder.FolderBasedBuilderConfig ):
'''simple docstring'''
_A = None
_A = None
class _lowercase ( folder_based_builder.FolderBasedBuilder ):
'''simple docstring'''
_A = datasets.Audio()
_A = 'audio'
_A = AudioFolderConfig
_A = 42 # definition at the bottom of the script
_A = AudioClassification(audio_column='audio' , label_column='label' )
A__ : Union[str, Any] = [
""".aiff""",
""".au""",
""".avr""",
""".caf""",
""".flac""",
""".htk""",
""".svx""",
""".mat4""",
""".mat5""",
""".mpc2k""",
""".ogg""",
""".paf""",
""".pvf""",
""".raw""",
""".rf64""",
""".sd2""",
""".sds""",
""".ircam""",
""".voc""",
""".w64""",
""".wav""",
""".nist""",
""".wavex""",
""".wve""",
""".xi""",
""".mp3""",
""".opus""",
]
A__ : str = AUDIO_EXTENSIONS
| 660 |
"""simple docstring"""
from pathlib import PurePosixPath
from typing import Optional
import fsspec
from fsspec import AbstractFileSystem
from huggingface_hub.hf_api import DatasetInfo
from ..utils.file_utils import get_authentication_headers_for_url
from ..utils.hub import hf_hub_url
class _lowercase ( lowerCAmelCase_ ):
'''simple docstring'''
_A = ''
_A = 'hf-legacy' # "hf://"" is reserved for hffs
def __init__( self , __UpperCamelCase = None , __UpperCamelCase = None , **__UpperCamelCase , )-> str:
super().__init__(self , **__UpperCamelCase )
UpperCAmelCase__ : int = repo_info
UpperCAmelCase__ : Optional[int] = token
UpperCAmelCase__ : Optional[Any] = None
def lowerCAmelCase__ ( self )-> Optional[Any]:
if self.dir_cache is None:
UpperCAmelCase__ : str = {}
for hf_file in self.repo_info.siblings:
# TODO(QL): add sizes
UpperCAmelCase__ : str = {
"name": hf_file.rfilename,
"size": None,
"type": "file",
}
self.dir_cache.update(
{
str(__UpperCamelCase ): {"name": str(__UpperCamelCase ), "size": None, "type": "directory"}
for d in list(PurePosixPath(hf_file.rfilename ).parents )[:-1]
} )
def lowerCAmelCase__ ( self , __UpperCamelCase , __UpperCamelCase = "rb" , **__UpperCamelCase , )-> List[Any]:
if not isinstance(self.repo_info , __UpperCamelCase ):
raise NotImplementedError(F"Open is only implemented for dataset repositories, but got {self.repo_info}" )
UpperCAmelCase__ : Union[str, Any] = hf_hub_url(self.repo_info.id , __UpperCamelCase , revision=self.repo_info.sha )
return fsspec.open(
__UpperCamelCase , mode=__UpperCamelCase , headers=get_authentication_headers_for_url(__UpperCamelCase , use_auth_token=self.token ) , client_kwargs={"trust_env": True} , ).open()
def lowerCAmelCase__ ( self , __UpperCamelCase , **__UpperCamelCase )-> List[str]:
self._get_dirs()
UpperCAmelCase__ : Union[str, Any] = self._strip_protocol(__UpperCamelCase )
if path in self.dir_cache:
return self.dir_cache[path]
else:
raise FileNotFoundError(__UpperCamelCase )
def lowerCAmelCase__ ( self , __UpperCamelCase , __UpperCamelCase=False , **__UpperCamelCase )-> str:
self._get_dirs()
UpperCAmelCase__ : str = PurePosixPath(path.strip("/" ) )
UpperCAmelCase__ : Optional[Any] = {}
for p, f in self.dir_cache.items():
UpperCAmelCase__ : Optional[int] = PurePosixPath(p.strip("/" ) )
UpperCAmelCase__ : Dict = p.parent
if root == path:
UpperCAmelCase__ : Tuple = f
UpperCAmelCase__ : List[Any] = list(paths.values() )
if detail:
return out
else:
return sorted(f["name"] for f in out )
| 660 | 1 |
"""simple docstring"""
# DISCLAIMER: This file is strongly influenced by https://github.com/ermongroup/ddim
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import flax
import jax
import jax.numpy as jnp
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils_flax import (
CommonSchedulerState,
FlaxKarrasDiffusionSchedulers,
FlaxSchedulerMixin,
FlaxSchedulerOutput,
add_noise_common,
get_velocity_common,
)
@flax.struct.dataclass
class _lowercase :
'''simple docstring'''
_A = 42
# setable values
_A = 42
_A = 42
_A = None
@classmethod
def lowerCAmelCase__ ( cls , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )-> Union[str, Any]:
return cls(common=__UpperCamelCase , init_noise_sigma=__UpperCamelCase , timesteps=__UpperCamelCase )
@dataclass
class _lowercase ( lowerCAmelCase_ ):
'''simple docstring'''
_A = 42
class _lowercase ( lowerCAmelCase_ , lowerCAmelCase_ ):
'''simple docstring'''
_A = [e.name for e in FlaxKarrasDiffusionSchedulers]
_A = 42
@property
def lowerCAmelCase__ ( self )-> Optional[int]:
return True
@register_to_config
def __init__( self , __UpperCamelCase = 10_00 , __UpperCamelCase = 0.0001 , __UpperCamelCase = 0.02 , __UpperCamelCase = "linear" , __UpperCamelCase = None , __UpperCamelCase = "fixed_small" , __UpperCamelCase = True , __UpperCamelCase = "epsilon" , __UpperCamelCase = jnp.floataa , )-> List[str]:
UpperCAmelCase__ : int = dtype
def lowerCAmelCase__ ( self , __UpperCamelCase = None )-> DDPMSchedulerState:
if common is None:
UpperCAmelCase__ : int = CommonSchedulerState.create(self )
# standard deviation of the initial noise distribution
UpperCAmelCase__ : Tuple = jnp.array(1.0 , dtype=self.dtype )
UpperCAmelCase__ : Tuple = jnp.arange(0 , self.config.num_train_timesteps ).round()[::-1]
return DDPMSchedulerState.create(
common=__UpperCamelCase , init_noise_sigma=__UpperCamelCase , timesteps=__UpperCamelCase , )
def lowerCAmelCase__ ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = None )-> jnp.ndarray:
return sample
def lowerCAmelCase__ ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = () )-> DDPMSchedulerState:
UpperCAmelCase__ : Dict = self.config.num_train_timesteps // num_inference_steps
# creates integer timesteps by multiplying by ratio
# rounding to avoid issues when num_inference_step is power of 3
UpperCAmelCase__ : Optional[int] = (jnp.arange(0 , __UpperCamelCase ) * step_ratio).round()[::-1]
return state.replace(
num_inference_steps=__UpperCamelCase , timesteps=__UpperCamelCase , )
def lowerCAmelCase__ ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase=None , __UpperCamelCase=None )-> Union[str, Any]:
UpperCAmelCase__ : Optional[Any] = state.common.alphas_cumprod[t]
UpperCAmelCase__ : int = jnp.where(t > 0 , state.common.alphas_cumprod[t - 1] , jnp.array(1.0 , dtype=self.dtype ) )
# For t > 0, compute predicted variance βt (see formula (6) and (7) from https://arxiv.org/pdf/2006.11239.pdf)
# and sample from it to get previous sample
# x_{t-1} ~ N(pred_prev_sample, variance) == add variance to pred_sample
UpperCAmelCase__ : Any = (1 - alpha_prod_t_prev) / (1 - alpha_prod_t) * state.common.betas[t]
if variance_type is None:
UpperCAmelCase__ : Union[str, Any] = self.config.variance_type
# hacks - were probably added for training stability
if variance_type == "fixed_small":
UpperCAmelCase__ : Dict = jnp.clip(__UpperCamelCase , a_min=1E-20 )
# for rl-diffuser https://arxiv.org/abs/2205.09991
elif variance_type == "fixed_small_log":
UpperCAmelCase__ : Tuple = jnp.log(jnp.clip(__UpperCamelCase , a_min=1E-20 ) )
elif variance_type == "fixed_large":
UpperCAmelCase__ : Union[str, Any] = state.common.betas[t]
elif variance_type == "fixed_large_log":
# Glide max_log
UpperCAmelCase__ : Optional[int] = jnp.log(state.common.betas[t] )
elif variance_type == "learned":
return predicted_variance
elif variance_type == "learned_range":
UpperCAmelCase__ : List[str] = variance
UpperCAmelCase__ : Union[str, Any] = state.common.betas[t]
UpperCAmelCase__ : Optional[int] = (predicted_variance + 1) / 2
UpperCAmelCase__ : Any = frac * max_log + (1 - frac) * min_log
return variance
def lowerCAmelCase__ ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = None , __UpperCamelCase = True , )-> Union[FlaxDDPMSchedulerOutput, Tuple]:
UpperCAmelCase__ : List[str] = timestep
if key is None:
UpperCAmelCase__ : int = jax.random.PRNGKey(0 )
if model_output.shape[1] == sample.shape[1] * 2 and self.config.variance_type in ["learned", "learned_range"]:
UpperCAmelCase__ , UpperCAmelCase__ : List[str] = jnp.split(__UpperCamelCase , sample.shape[1] , axis=1 )
else:
UpperCAmelCase__ : Optional[Any] = None
# 1. compute alphas, betas
UpperCAmelCase__ : Union[str, Any] = state.common.alphas_cumprod[t]
UpperCAmelCase__ : Tuple = jnp.where(t > 0 , state.common.alphas_cumprod[t - 1] , jnp.array(1.0 , dtype=self.dtype ) )
UpperCAmelCase__ : Union[str, Any] = 1 - alpha_prod_t
UpperCAmelCase__ : Tuple = 1 - alpha_prod_t_prev
# 2. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf
if self.config.prediction_type == "epsilon":
UpperCAmelCase__ : List[Any] = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
elif self.config.prediction_type == "sample":
UpperCAmelCase__ : Any = model_output
elif self.config.prediction_type == "v_prediction":
UpperCAmelCase__ : Union[str, Any] = (alpha_prod_t**0.5) * sample - (beta_prod_t**0.5) * model_output
else:
raise ValueError(
F"prediction_type given as {self.config.prediction_type} must be one of `epsilon`, `sample` "
" for the FlaxDDPMScheduler." )
# 3. Clip "predicted x_0"
if self.config.clip_sample:
UpperCAmelCase__ : List[Any] = jnp.clip(__UpperCamelCase , -1 , 1 )
# 4. Compute coefficients for pred_original_sample x_0 and current sample x_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
UpperCAmelCase__ : List[str] = (alpha_prod_t_prev ** 0.5 * state.common.betas[t]) / beta_prod_t
UpperCAmelCase__ : List[Any] = state.common.alphas[t] ** 0.5 * beta_prod_t_prev / beta_prod_t
# 5. Compute predicted previous sample µ_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
UpperCAmelCase__ : Tuple = pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample
# 6. Add noise
def random_variance():
UpperCAmelCase__ : Any = jax.random.split(__UpperCamelCase , num=1 )
UpperCAmelCase__ : int = jax.random.normal(__UpperCamelCase , shape=model_output.shape , dtype=self.dtype )
return (self._get_variance(__UpperCamelCase , __UpperCamelCase , predicted_variance=__UpperCamelCase ) ** 0.5) * noise
UpperCAmelCase__ : Dict = jnp.where(t > 0 , random_variance() , jnp.zeros(model_output.shape , dtype=self.dtype ) )
UpperCAmelCase__ : Any = pred_prev_sample + variance
if not return_dict:
return (pred_prev_sample, state)
return FlaxDDPMSchedulerOutput(prev_sample=__UpperCamelCase , state=__UpperCamelCase )
def lowerCAmelCase__ ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , )-> jnp.ndarray:
return add_noise_common(state.common , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
def lowerCAmelCase__ ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , )-> jnp.ndarray:
return get_velocity_common(state.common , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
def __len__( self )-> Tuple:
return self.config.num_train_timesteps
| 660 |
"""simple docstring"""
from typing import Dict, List, Optional, Union
import numpy as np
from transformers.utils import is_vision_available
from transformers.utils.generic import TensorType
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
is_valid_image,
to_numpy_array,
valid_images,
)
from ...utils import logging
if is_vision_available():
import PIL
A__ : Dict = logging.get_logger(__name__)
def a__ ( lowerCAmelCase : Optional[Any] ):
'''simple docstring'''
if isinstance(lowerCAmelCase , (list, tuple) ) and isinstance(videos[0] , (list, tuple) ) and is_valid_image(videos[0][0] ):
return videos
elif isinstance(lowerCAmelCase , (list, tuple) ) and is_valid_image(videos[0] ):
return [videos]
elif is_valid_image(lowerCAmelCase ):
return [[videos]]
raise ValueError(F"Could not make batched video from {videos}" )
class _lowercase ( lowerCAmelCase_ ):
'''simple docstring'''
_A = ['pixel_values']
def __init__( self , __UpperCamelCase = True , __UpperCamelCase = None , __UpperCamelCase = PILImageResampling.BILINEAR , __UpperCamelCase = True , __UpperCamelCase = None , __UpperCamelCase = True , __UpperCamelCase = 1 / 2_55 , __UpperCamelCase = True , __UpperCamelCase = True , __UpperCamelCase = None , __UpperCamelCase = None , **__UpperCamelCase , )-> None:
super().__init__(**__UpperCamelCase )
UpperCAmelCase__ : Optional[Any] = size if size is not None else {"shortest_edge": 2_56}
UpperCAmelCase__ : List[Any] = get_size_dict(__UpperCamelCase , default_to_square=__UpperCamelCase )
UpperCAmelCase__ : List[str] = crop_size if crop_size is not None else {"height": 2_24, "width": 2_24}
UpperCAmelCase__ : int = get_size_dict(__UpperCamelCase , param_name="crop_size" )
UpperCAmelCase__ : Dict = do_resize
UpperCAmelCase__ : Optional[int] = size
UpperCAmelCase__ : List[Any] = do_center_crop
UpperCAmelCase__ : str = crop_size
UpperCAmelCase__ : Optional[int] = resample
UpperCAmelCase__ : int = do_rescale
UpperCAmelCase__ : Union[str, Any] = rescale_factor
UpperCAmelCase__ : Union[str, Any] = offset
UpperCAmelCase__ : Dict = do_normalize
UpperCAmelCase__ : int = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
UpperCAmelCase__ : Union[str, Any] = image_std if image_std is not None else IMAGENET_STANDARD_STD
def lowerCAmelCase__ ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = PILImageResampling.BILINEAR , __UpperCamelCase = None , **__UpperCamelCase , )-> np.ndarray:
UpperCAmelCase__ : Optional[int] = get_size_dict(__UpperCamelCase , default_to_square=__UpperCamelCase )
if "shortest_edge" in size:
UpperCAmelCase__ : Union[str, Any] = get_resize_output_image_size(__UpperCamelCase , size["shortest_edge"] , default_to_square=__UpperCamelCase )
elif "height" in size and "width" in size:
UpperCAmelCase__ : Any = (size["height"], size["width"])
else:
raise ValueError(F"Size must have 'height' and 'width' or 'shortest_edge' as keys. Got {size.keys()}" )
return resize(__UpperCamelCase , size=__UpperCamelCase , resample=__UpperCamelCase , data_format=__UpperCamelCase , **__UpperCamelCase )
def lowerCAmelCase__ ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = None , **__UpperCamelCase , )-> np.ndarray:
UpperCAmelCase__ : Optional[Any] = get_size_dict(__UpperCamelCase )
if "height" not in size or "width" not in size:
raise ValueError(F"Size must have 'height' and 'width' as keys. Got {size.keys()}" )
return center_crop(__UpperCamelCase , size=(size["height"], size["width"]) , data_format=__UpperCamelCase , **__UpperCamelCase )
def lowerCAmelCase__ ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = True , __UpperCamelCase = None , **__UpperCamelCase , )-> Tuple:
UpperCAmelCase__ : str = image.astype(np.floataa )
if offset:
UpperCAmelCase__ : Tuple = image - (scale / 2)
return rescale(__UpperCamelCase , scale=__UpperCamelCase , data_format=__UpperCamelCase , **__UpperCamelCase )
def lowerCAmelCase__ ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = None , **__UpperCamelCase , )-> np.ndarray:
return normalize(__UpperCamelCase , mean=__UpperCamelCase , std=__UpperCamelCase , data_format=__UpperCamelCase , **__UpperCamelCase )
def lowerCAmelCase__ ( self , __UpperCamelCase , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = ChannelDimension.FIRST , )-> np.ndarray:
if do_resize and size is None or resample is None:
raise ValueError("Size and resample must be specified if do_resize is True." )
if do_center_crop and crop_size is None:
raise ValueError("Crop size must be specified if do_center_crop is True." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("Image mean and std must be specified if do_normalize is True." )
if offset and not do_rescale:
raise ValueError("For offset, do_rescale must also be set to True." )
# All transformations expect numpy arrays.
UpperCAmelCase__ : Optional[Any] = to_numpy_array(__UpperCamelCase )
if do_resize:
UpperCAmelCase__ : Union[str, Any] = self.resize(image=__UpperCamelCase , size=__UpperCamelCase , resample=__UpperCamelCase )
if do_center_crop:
UpperCAmelCase__ : int = self.center_crop(__UpperCamelCase , size=__UpperCamelCase )
if do_rescale:
UpperCAmelCase__ : List[str] = self.rescale(image=__UpperCamelCase , scale=__UpperCamelCase , offset=__UpperCamelCase )
if do_normalize:
UpperCAmelCase__ : List[Any] = self.normalize(image=__UpperCamelCase , mean=__UpperCamelCase , std=__UpperCamelCase )
UpperCAmelCase__ : Dict = to_channel_dimension_format(__UpperCamelCase , __UpperCamelCase )
return image
def lowerCAmelCase__ ( self , __UpperCamelCase , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = ChannelDimension.FIRST , **__UpperCamelCase , )-> PIL.Image.Image:
UpperCAmelCase__ : Optional[int] = do_resize if do_resize is not None else self.do_resize
UpperCAmelCase__ : int = resample if resample is not None else self.resample
UpperCAmelCase__ : Tuple = do_center_crop if do_center_crop is not None else self.do_center_crop
UpperCAmelCase__ : int = do_rescale if do_rescale is not None else self.do_rescale
UpperCAmelCase__ : Any = rescale_factor if rescale_factor is not None else self.rescale_factor
UpperCAmelCase__ : Optional[int] = offset if offset is not None else self.offset
UpperCAmelCase__ : Dict = do_normalize if do_normalize is not None else self.do_normalize
UpperCAmelCase__ : Dict = image_mean if image_mean is not None else self.image_mean
UpperCAmelCase__ : Optional[int] = image_std if image_std is not None else self.image_std
UpperCAmelCase__ : List[str] = size if size is not None else self.size
UpperCAmelCase__ : Optional[int] = get_size_dict(__UpperCamelCase , default_to_square=__UpperCamelCase )
UpperCAmelCase__ : Dict = crop_size if crop_size is not None else self.crop_size
UpperCAmelCase__ : Tuple = get_size_dict(__UpperCamelCase , param_name="crop_size" )
if not valid_images(__UpperCamelCase ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
UpperCAmelCase__ : List[str] = make_batched(__UpperCamelCase )
UpperCAmelCase__ : Optional[Any] = [
[
self._preprocess_image(
image=__UpperCamelCase , do_resize=__UpperCamelCase , size=__UpperCamelCase , resample=__UpperCamelCase , do_center_crop=__UpperCamelCase , crop_size=__UpperCamelCase , do_rescale=__UpperCamelCase , rescale_factor=__UpperCamelCase , offset=__UpperCamelCase , do_normalize=__UpperCamelCase , image_mean=__UpperCamelCase , image_std=__UpperCamelCase , data_format=__UpperCamelCase , )
for img in video
]
for video in videos
]
UpperCAmelCase__ : Dict = {"pixel_values": videos}
return BatchFeature(data=__UpperCamelCase , tensor_type=__UpperCamelCase )
| 660 | 1 |
"""simple docstring"""
import math
import os
import sys
def a__ ( lowerCAmelCase : str ):
'''simple docstring'''
UpperCAmelCase__ : Union[str, Any] = ""
try:
with open(lowerCAmelCase , "rb" ) as binary_file:
UpperCAmelCase__ : List[Any] = binary_file.read()
for dat in data:
UpperCAmelCase__ : Union[str, Any] = F"{dat:08b}"
result += curr_byte
return result
except OSError:
print("File not accessible" )
sys.exit()
def a__ ( lowerCAmelCase : dict[str, str] , lowerCAmelCase : str , lowerCAmelCase : int , lowerCAmelCase : str ):
'''simple docstring'''
lexicon.pop(lowerCAmelCase )
UpperCAmelCase__ : List[Any] = last_match_id
if math.loga(lowerCAmelCase ).is_integer():
for curr_key in lexicon:
UpperCAmelCase__ : Tuple = "0" + lexicon[curr_key]
UpperCAmelCase__ : int = bin(lowerCAmelCase )[2:]
def a__ ( lowerCAmelCase : str ):
'''simple docstring'''
UpperCAmelCase__ : Optional[int] = {"0": "0", "1": "1"}
UpperCAmelCase__ , UpperCAmelCase__ : Dict = "", ""
UpperCAmelCase__ : Any = len(lowerCAmelCase )
for i in range(len(lowerCAmelCase ) ):
curr_string += data_bits[i]
if curr_string not in lexicon:
continue
UpperCAmelCase__ : List[str] = lexicon[curr_string]
result += last_match_id
add_key_to_lexicon(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
index += 1
UpperCAmelCase__ : str = ""
while curr_string != "" and curr_string not in lexicon:
curr_string += "0"
if curr_string != "":
UpperCAmelCase__ : Tuple = lexicon[curr_string]
result += last_match_id
return result
def a__ ( lowerCAmelCase : str , lowerCAmelCase : str ):
'''simple docstring'''
UpperCAmelCase__ : Optional[int] = os.path.getsize(lowerCAmelCase )
UpperCAmelCase__ : List[Any] = bin(lowerCAmelCase )[2:]
UpperCAmelCase__ : int = len(lowerCAmelCase )
return "0" * (length_length - 1) + file_length_binary + compressed
def a__ ( lowerCAmelCase : str , lowerCAmelCase : str ):
'''simple docstring'''
UpperCAmelCase__ : Optional[Any] = 8
try:
with open(lowerCAmelCase , "wb" ) as opened_file:
UpperCAmelCase__ : Any = [
to_write[i : i + byte_length]
for i in range(0 , len(lowerCAmelCase ) , lowerCAmelCase )
]
if len(result_byte_array[-1] ) % byte_length == 0:
result_byte_array.append("10000000" )
else:
result_byte_array[-1] += "1" + "0" * (
byte_length - len(result_byte_array[-1] ) - 1
)
for elem in result_byte_array:
opened_file.write(int(lowerCAmelCase , 2 ).to_bytes(1 , byteorder="big" ) )
except OSError:
print("File not accessible" )
sys.exit()
def a__ ( lowerCAmelCase : str , lowerCAmelCase : str ):
'''simple docstring'''
UpperCAmelCase__ : int = read_file_binary(lowerCAmelCase )
UpperCAmelCase__ : Optional[Any] = compress_data(lowerCAmelCase )
UpperCAmelCase__ : int = add_file_length(lowerCAmelCase , lowerCAmelCase )
write_file_binary(lowerCAmelCase , lowerCAmelCase )
if __name__ == "__main__":
compress(sys.argv[1], sys.argv[2])
| 660 |
"""simple docstring"""
def a__ ( lowerCAmelCase : int ):
'''simple docstring'''
if a < 0:
raise ValueError("Input value must be a positive integer" )
elif isinstance(lowerCAmelCase , lowerCAmelCase ):
raise TypeError("Input value must be a 'int' type" )
return bin(lowerCAmelCase ).count("1" )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 660 | 1 |
"""simple docstring"""
from dataclasses import dataclass
from typing import List, Optional, Union
import numpy as np
import PIL
from PIL import Image
from ...utils import (
BaseOutput,
OptionalDependencyNotAvailable,
is_flax_available,
is_k_diffusion_available,
is_k_diffusion_version,
is_onnx_available,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
@dataclass
class _lowercase ( lowerCAmelCase_ ):
'''simple docstring'''
_A = 42
_A = 42
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .pipeline_cycle_diffusion import CycleDiffusionPipeline
from .pipeline_stable_diffusion import StableDiffusionPipeline
from .pipeline_stable_diffusion_attend_and_excite import StableDiffusionAttendAndExcitePipeline
from .pipeline_stable_diffusion_imgaimg import StableDiffusionImgaImgPipeline
from .pipeline_stable_diffusion_inpaint import StableDiffusionInpaintPipeline
from .pipeline_stable_diffusion_inpaint_legacy import StableDiffusionInpaintPipelineLegacy
from .pipeline_stable_diffusion_instruct_pixapix import StableDiffusionInstructPixaPixPipeline
from .pipeline_stable_diffusion_latent_upscale import StableDiffusionLatentUpscalePipeline
from .pipeline_stable_diffusion_ldmad import StableDiffusionLDMaDPipeline
from .pipeline_stable_diffusion_model_editing import StableDiffusionModelEditingPipeline
from .pipeline_stable_diffusion_panorama import StableDiffusionPanoramaPipeline
from .pipeline_stable_diffusion_paradigms import StableDiffusionParadigmsPipeline
from .pipeline_stable_diffusion_sag import StableDiffusionSAGPipeline
from .pipeline_stable_diffusion_upscale import StableDiffusionUpscalePipeline
from .pipeline_stable_unclip import StableUnCLIPPipeline
from .pipeline_stable_unclip_imgaimg import StableUnCLIPImgaImgPipeline
from .safety_checker import StableDiffusionSafetyChecker
from .stable_unclip_image_normalizer import StableUnCLIPImageNormalizer
try:
if not (is_transformers_available() and is_torch_available() and is_transformers_version(""">=""", """4.25.0""")):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import StableDiffusionImageVariationPipeline
else:
from .pipeline_stable_diffusion_image_variation import StableDiffusionImageVariationPipeline
try:
if not (is_transformers_available() and is_torch_available() and is_transformers_version(""">=""", """4.26.0""")):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import (
StableDiffusionDepthaImgPipeline,
StableDiffusionDiffEditPipeline,
StableDiffusionPixaPixZeroPipeline,
)
else:
from .pipeline_stable_diffusion_depthaimg import StableDiffusionDepthaImgPipeline
from .pipeline_stable_diffusion_diffedit import StableDiffusionDiffEditPipeline
from .pipeline_stable_diffusion_pixapix_zero import StableDiffusionPixaPixZeroPipeline
try:
if not (
is_torch_available()
and is_transformers_available()
and is_k_diffusion_available()
and is_k_diffusion_version(""">=""", """0.0.12""")
):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_and_k_diffusion_objects import * # noqa F403
else:
from .pipeline_stable_diffusion_k_diffusion import StableDiffusionKDiffusionPipeline
try:
if not (is_transformers_available() and is_onnx_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_onnx_objects import * # noqa F403
else:
from .pipeline_onnx_stable_diffusion import OnnxStableDiffusionPipeline, StableDiffusionOnnxPipeline
from .pipeline_onnx_stable_diffusion_imgaimg import OnnxStableDiffusionImgaImgPipeline
from .pipeline_onnx_stable_diffusion_inpaint import OnnxStableDiffusionInpaintPipeline
from .pipeline_onnx_stable_diffusion_inpaint_legacy import OnnxStableDiffusionInpaintPipelineLegacy
from .pipeline_onnx_stable_diffusion_upscale import OnnxStableDiffusionUpscalePipeline
if is_transformers_available() and is_flax_available():
import flax
@flax.struct.dataclass
class _lowercase ( lowerCAmelCase_ ):
'''simple docstring'''
_A = 42
_A = 42
from ...schedulers.scheduling_pndm_flax import PNDMSchedulerState
from .pipeline_flax_stable_diffusion import FlaxStableDiffusionPipeline
from .pipeline_flax_stable_diffusion_imgaimg import FlaxStableDiffusionImgaImgPipeline
from .pipeline_flax_stable_diffusion_inpaint import FlaxStableDiffusionInpaintPipeline
from .safety_checker_flax import FlaxStableDiffusionSafetyChecker
| 660 |
"""simple docstring"""
import random
import timeit
from functools import wraps
from typing import Callable, Optional
from ..configuration_utils import PretrainedConfig
from ..models.auto.modeling_tf_auto import TF_MODEL_MAPPING, TF_MODEL_WITH_LM_HEAD_MAPPING
from ..utils import is_pyanvml_available, is_tf_available, logging
from .benchmark_utils import (
Benchmark,
Memory,
MemorySummary,
measure_peak_memory_cpu,
start_memory_tracing,
stop_memory_tracing,
)
if is_tf_available():
import tensorflow as tf
from tensorflow.python.framework.errors_impl import ResourceExhaustedError
from .benchmark_args_tf import TensorFlowBenchmarkArguments
if is_pyanvml_available():
import pyanvml.pyanvml as nvml
A__ : Optional[Any] = logging.get_logger(__name__)
def a__ ( lowerCAmelCase : bool , lowerCAmelCase : bool ):
'''simple docstring'''
def run_func(lowerCAmelCase : Dict ):
@wraps(lowerCAmelCase )
def run_in_eager_mode(*lowerCAmelCase : Optional[Any] , **lowerCAmelCase : Dict ):
return func(*lowerCAmelCase , **lowerCAmelCase )
@wraps(lowerCAmelCase )
@tf.function(experimental_compile=lowerCAmelCase )
def run_in_graph_mode(*lowerCAmelCase : Optional[Any] , **lowerCAmelCase : Optional[Any] ):
return func(*lowerCAmelCase , **lowerCAmelCase )
if do_eager_mode is True:
if use_xla is not False:
raise ValueError(
"Cannot run model in XLA, if `args.eager_mode` is set to `True`. Please set `args.eager_mode=False`." )
return run_in_eager_mode
else:
return run_in_graph_mode
return run_func
def a__ ( lowerCAmelCase : int , lowerCAmelCase : int , lowerCAmelCase : int ):
'''simple docstring'''
UpperCAmelCase__ : Dict = random.Random()
UpperCAmelCase__ : List[str] = [rng.randint(0 , vocab_size - 1 ) for i in range(batch_size * sequence_length )]
return tf.constant(lowerCAmelCase , shape=(batch_size, sequence_length) , dtype=tf.intaa )
class _lowercase ( lowerCAmelCase_ ):
'''simple docstring'''
_A = 42
_A = 42
_A = "TensorFlow"
@property
def lowerCAmelCase__ ( self )-> Optional[int]:
return tf.__version__
def lowerCAmelCase__ ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )-> float:
# initialize GPU on separate process
UpperCAmelCase__ : Any = self.args.strategy
if strategy is None:
raise ValueError("A device strategy has to be initialized before using TensorFlow." )
UpperCAmelCase__ : Union[str, Any] = self._prepare_inference_func(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
return self._measure_speed(_inference )
def lowerCAmelCase__ ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )-> float:
UpperCAmelCase__ : List[Any] = self.args.strategy
if strategy is None:
raise ValueError("A device strategy has to be initialized before using TensorFlow." )
UpperCAmelCase__ : List[Any] = self._prepare_train_func(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
return self._measure_speed(_train )
def lowerCAmelCase__ ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )-> [Memory, Optional[MemorySummary]]:
# initialize GPU on separate process
if self.args.is_gpu:
tf.config.experimental.set_memory_growth(self.args.gpu_list[self.args.device_idx] , __UpperCamelCase )
UpperCAmelCase__ : List[str] = self.args.strategy
if strategy is None:
raise ValueError("A device strategy has to be initialized before using TensorFlow." )
UpperCAmelCase__ : Any = self._prepare_inference_func(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
return self._measure_memory(_inference )
def lowerCAmelCase__ ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )-> [Memory, Optional[MemorySummary]]:
if self.args.is_gpu:
tf.config.experimental.set_memory_growth(self.args.gpu_list[self.args.device_idx] , __UpperCamelCase )
UpperCAmelCase__ : Any = self.args.strategy
if strategy is None:
raise ValueError("A device strategy has to be initialized before using TensorFlow." )
UpperCAmelCase__ : Optional[Any] = self._prepare_train_func(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
return self._measure_memory(_train )
def lowerCAmelCase__ ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )-> Callable[[], None]:
UpperCAmelCase__ : Union[str, Any] = self.config_dict[model_name]
if self.args.fpaa:
raise NotImplementedError("Mixed precision is currently not supported." )
UpperCAmelCase__ : Optional[int] = (
hasattr(__UpperCamelCase , "architectures" )
and isinstance(config.architectures , __UpperCamelCase )
and len(config.architectures ) > 0
)
if not self.args.only_pretrain_model and has_model_class_in_config:
try:
UpperCAmelCase__ : str = "TF" + config.architectures[0] # prepend 'TF' for tensorflow model
UpperCAmelCase__ : Any = __import__("transformers" , fromlist=[model_class] )
UpperCAmelCase__ : List[Any] = getattr(__UpperCamelCase , __UpperCamelCase )
UpperCAmelCase__ : Dict = model_cls(__UpperCamelCase )
except ImportError:
raise ImportError(
F"{model_class} does not exist. If you just want to test the pretrained model, you might want to"
" set `--only_pretrain_model` or `args.only_pretrain_model=True`." )
else:
UpperCAmelCase__ : Any = TF_MODEL_MAPPING[config.__class__](__UpperCamelCase )
# encoder-decoder has vocab size saved differently
UpperCAmelCase__ : int = config.vocab_size if hasattr(__UpperCamelCase , "vocab_size" ) else config.encoder.vocab_size
UpperCAmelCase__ : Optional[Any] = random_input_ids(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
@run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla )
def encoder_decoder_forward():
return model(__UpperCamelCase , decoder_input_ids=__UpperCamelCase , training=__UpperCamelCase )
@run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla )
def encoder_forward():
return model(__UpperCamelCase , training=__UpperCamelCase )
UpperCAmelCase__ : Dict = encoder_decoder_forward if config.is_encoder_decoder else encoder_forward
return _inference
def lowerCAmelCase__ ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )-> Callable[[], None]:
UpperCAmelCase__ : List[Any] = self.config_dict[model_name]
if self.args.eager_mode is not False:
raise ValueError("Training cannot be done in eager mode. Please make sure that `args.eager_mode = False`." )
if self.args.fpaa:
raise NotImplementedError("Mixed precision is currently not supported." )
UpperCAmelCase__ : Any = (
hasattr(__UpperCamelCase , "architectures" )
and isinstance(config.architectures , __UpperCamelCase )
and len(config.architectures ) > 0
)
if not self.args.only_pretrain_model and has_model_class_in_config:
try:
UpperCAmelCase__ : Any = "TF" + config.architectures[0] # prepend 'TF' for tensorflow model
UpperCAmelCase__ : int = __import__("transformers" , fromlist=[model_class] )
UpperCAmelCase__ : int = getattr(__UpperCamelCase , __UpperCamelCase )
UpperCAmelCase__ : Optional[Any] = model_cls(__UpperCamelCase )
except ImportError:
raise ImportError(
F"{model_class} does not exist. If you just want to test the pretrained model, you might want to"
" set `--only_pretrain_model` or `args.only_pretrain_model=True`." )
else:
UpperCAmelCase__ : List[str] = TF_MODEL_WITH_LM_HEAD_MAPPING[config.__class__](__UpperCamelCase )
# encoder-decoder has vocab size saved differently
UpperCAmelCase__ : Union[str, Any] = config.vocab_size if hasattr(__UpperCamelCase , "vocab_size" ) else config.encoder.vocab_size
UpperCAmelCase__ : Dict = random_input_ids(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
@run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla )
def encoder_decoder_train():
UpperCAmelCase__ : Union[str, Any] = model(__UpperCamelCase , decoder_input_ids=__UpperCamelCase , labels=__UpperCamelCase , training=__UpperCamelCase )[0]
UpperCAmelCase__ : Union[str, Any] = tf.gradients(__UpperCamelCase , model.trainable_variables )
return gradients
@run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla )
def encoder_train():
UpperCAmelCase__ : Union[str, Any] = model(__UpperCamelCase , labels=__UpperCamelCase , training=__UpperCamelCase )[0]
UpperCAmelCase__ : Any = tf.gradients(__UpperCamelCase , model.trainable_variables )
return gradients
UpperCAmelCase__ : str = encoder_decoder_train if config.is_encoder_decoder else encoder_train
return _train
def lowerCAmelCase__ ( self , __UpperCamelCase )-> float:
with self.args.strategy.scope():
try:
if self.args.is_tpu or self.args.use_xla:
# run additional 10 times to stabilize compilation for tpu
logger.info("Do inference on TPU. Running model 5 times to stabilize compilation" )
timeit.repeat(__UpperCamelCase , repeat=1 , number=5 )
# as written in https://docs.python.org/2/library/timeit.html#timeit.Timer.repeat, min should be taken rather than the average
UpperCAmelCase__ : Optional[Any] = timeit.repeat(
__UpperCamelCase , repeat=self.args.repeat , number=10 , )
return min(__UpperCamelCase ) / 10.0
except ResourceExhaustedError as e:
self.print_fn(F"Doesn't fit on GPU. {e}" )
def lowerCAmelCase__ ( self , __UpperCamelCase )-> [Memory, MemorySummary]:
logger.info(
"Note that TensorFlow allocates more memory than "
"it might need to speed up computation. "
"The memory reported here corresponds to the memory "
"reported by `nvidia-smi`, which can vary depending "
"on total available memory on the GPU that is used." )
with self.args.strategy.scope():
try:
if self.args.trace_memory_line_by_line:
if not self.args.eager_mode:
raise ValueError(
"`args.eager_mode` is set to `False`. Make sure to run model in eager mode to measure memory"
" consumption line by line." )
UpperCAmelCase__ : List[str] = start_memory_tracing("transformers" )
if self.args.is_tpu:
# tpu
raise NotImplementedError(
"Memory Benchmarking is currently not implemented for TPU. Please disable memory benchmarking"
" with `args.memory=False`" )
elif self.args.is_gpu:
# gpu
if not is_pyanvml_available():
logger.warning(
"py3nvml not installed, we won't log GPU memory usage. "
"Install py3nvml (pip install py3nvml) to log information about GPU." )
UpperCAmelCase__ : Optional[int] = "N/A"
else:
logger.info(
"Measuring total GPU usage on GPU device. Make sure to not have additional processes"
" running on the same GPU." )
# init nvml
nvml.nvmlInit()
func()
UpperCAmelCase__ : Any = nvml.nvmlDeviceGetHandleByIndex(self.args.device_idx )
UpperCAmelCase__ : Optional[int] = nvml.nvmlDeviceGetMemoryInfo(__UpperCamelCase )
UpperCAmelCase__ : str = meminfo.used
UpperCAmelCase__ : int = Memory(__UpperCamelCase )
# shutdown nvml
nvml.nvmlShutdown()
else:
# cpu
if self.args.trace_memory_line_by_line:
logger.info(
"When enabling line by line tracing, the max peak memory for CPU is inaccurate in"
" TensorFlow." )
UpperCAmelCase__ : Any = None
else:
UpperCAmelCase__ : List[Any] = measure_peak_memory_cpu(__UpperCamelCase )
UpperCAmelCase__ : Optional[Any] = Memory(__UpperCamelCase ) if isinstance(__UpperCamelCase , __UpperCamelCase ) else memory_bytes
if self.args.trace_memory_line_by_line:
UpperCAmelCase__ : Optional[Any] = stop_memory_tracing(__UpperCamelCase )
if memory is None:
UpperCAmelCase__ : Tuple = summary.total
else:
UpperCAmelCase__ : int = None
return memory, summary
except ResourceExhaustedError as e:
self.print_fn(F"Doesn't fit on GPU. {e}" )
return "N/A", None
| 660 | 1 |
"""simple docstring"""
import argparse
import torch
from transformers import BertConfig, BertForPreTraining, load_tf_weights_in_bert
from transformers.utils import logging
logging.set_verbosity_info()
def a__ ( lowerCAmelCase : str , lowerCAmelCase : Tuple , lowerCAmelCase : Dict ):
'''simple docstring'''
# Initialise PyTorch model
UpperCAmelCase__ : int = BertConfig.from_json_file(lowerCAmelCase )
print(F"Building PyTorch model from configuration: {config}" )
UpperCAmelCase__ : int = BertForPreTraining(lowerCAmelCase )
# Load weights from tf checkpoint
load_tf_weights_in_bert(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
# Save pytorch-model
print(F"Save PyTorch model to {pytorch_dump_path}" )
torch.save(model.state_dict() , lowerCAmelCase )
if __name__ == "__main__":
A__ : Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--tf_checkpoint_path""", default=None, type=str, required=True, help="""Path to the TensorFlow checkpoint path."""
)
parser.add_argument(
"""--bert_config_file""",
default=None,
type=str,
required=True,
help=(
"""The config json file corresponding to the pre-trained BERT model. \n"""
"""This specifies the model architecture."""
),
)
parser.add_argument(
"""--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
A__ : Any = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.bert_config_file, args.pytorch_dump_path)
| 660 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ....utils import _LazyModule
A__ : List[str] = {"""tokenization_tapex""": ["""TapexTokenizer"""]}
if TYPE_CHECKING:
from .tokenization_tapex import TapexTokenizer
else:
import sys
A__ : Any = _LazyModule(__name__, globals()["""__file__"""], _import_structure)
| 660 | 1 |
"""simple docstring"""
import argparse
import shutil
import time
from json import JSONDecodeError
from logging import getLogger
from pathlib import Path
from typing import Dict, List
import torch
from torch.utils.data import DataLoader
from tqdm import tqdm
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
from utils import (
SeqaSeqDataset,
calculate_bleu,
calculate_rouge,
chunks,
lmap,
load_json,
parse_numeric_n_bool_cl_kwargs,
save_json,
use_task_specific_params,
write_txt_file,
)
A__ : int = getLogger(__name__)
def a__ ( lowerCAmelCase : List[Any] , lowerCAmelCase : str , lowerCAmelCase : str , lowerCAmelCase : int = 8 , lowerCAmelCase : int = 1024 , lowerCAmelCase : List[Any]="val" , lowerCAmelCase : str=None , lowerCAmelCase : int=False , lowerCAmelCase : Dict="summarization" , lowerCAmelCase : int=None , lowerCAmelCase : List[str]=1 , lowerCAmelCase : Dict = None , lowerCAmelCase : List[str]="" , **lowerCAmelCase : int , ):
'''simple docstring'''
UpperCAmelCase__ : Dict = str(lowerCAmelCase )
assert local_rank is not None
torch.distributed.init_process_group(backend="nccl" , rank=lowerCAmelCase )
UpperCAmelCase__ : List[str] = Path(lowerCAmelCase )
UpperCAmelCase__ : str = save_dir.joinpath(F"rank_{local_rank}_output.json" )
torch.cuda.set_device(lowerCAmelCase )
UpperCAmelCase__ : Optional[Any] = AutoModelForSeqaSeqLM.from_pretrained(lowerCAmelCase ).cuda()
if fpaa:
UpperCAmelCase__ : List[Any] = model.half()
# determine if we need to increase num_beams
use_task_specific_params(lowerCAmelCase , lowerCAmelCase ) # update config with task specific params
UpperCAmelCase__ : List[Any] = generate_kwargs.pop("num_beams" , model.config.num_beams ) # AttributeError risk?
if num_return_sequences > num_beams:
UpperCAmelCase__ : Any = num_return_sequences
UpperCAmelCase__ : List[Any] = AutoTokenizer.from_pretrained(lowerCAmelCase )
logger.info(F"Inferred tokenizer type: {tokenizer.__class__}" ) # if this is wrong, check config.model_type.
if max_source_length is None:
UpperCAmelCase__ : int = tokenizer.model_max_length
if prefix is None:
UpperCAmelCase__ : Union[str, Any] = prefix or getattr(model.config , "prefix" , "" ) or ""
UpperCAmelCase__ : str = SeqaSeqDataset(
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , max_target_length=1024 , type_path=lowerCAmelCase , n_obs=lowerCAmelCase , prefix=lowerCAmelCase , **lowerCAmelCase , )
# I set shuffle=True for a more accurate progress bar.
# If all the longest samples are first, the prog bar estimate is too high at the beginning.
UpperCAmelCase__ : Union[str, Any] = ds.make_sortish_sampler(lowerCAmelCase , distributed=lowerCAmelCase , add_extra_examples=lowerCAmelCase , shuffle=lowerCAmelCase )
UpperCAmelCase__ : Optional[int] = DataLoader(lowerCAmelCase , sampler=lowerCAmelCase , batch_size=lowerCAmelCase , collate_fn=ds.collate_fn )
UpperCAmelCase__ : str = []
for batch in tqdm(lowerCAmelCase ):
UpperCAmelCase__ : Dict = model.generate(
input_ids=batch["input_ids"].to(model.device ) , attention_mask=batch["attention_mask"].to(model.device ) , num_return_sequences=lowerCAmelCase , num_beams=lowerCAmelCase , **lowerCAmelCase , )
UpperCAmelCase__ : int = tokenizer.batch_decode(lowerCAmelCase , skip_special_tokens=lowerCAmelCase , clean_up_tokenization_spaces=lowerCAmelCase )
UpperCAmelCase__ : int = batch["ids"]
if num_return_sequences > 1:
UpperCAmelCase__ : str = chunks(lowerCAmelCase , lowerCAmelCase ) # batch size chunks, each of size num_return_seq
for i, pred in enumerate(lowerCAmelCase ):
results.append({"pred": pred, "id": ids[i].item()} )
save_json(lowerCAmelCase , lowerCAmelCase )
return results, sampler.num_replicas
def a__ ( ):
'''simple docstring'''
UpperCAmelCase__ : str = argparse.ArgumentParser(
epilog="Unspecified args like --num_beams=2 --decoder_start_token_id=4 are passed to model.generate" )
parser.add_argument("--data_dir" , type=lowerCAmelCase , help="like cnn_dm/test.source" )
parser.add_argument(
"--model_name" , type=lowerCAmelCase , help="like facebook/bart-large-cnn,t5-base, etc." , default="sshleifer/distilbart-xsum-12-3" , )
parser.add_argument("--save_dir" , type=lowerCAmelCase , help="where to save" , default="tmp_gen" )
parser.add_argument("--max_source_length" , type=lowerCAmelCase , default=lowerCAmelCase )
parser.add_argument(
"--type_path" , type=lowerCAmelCase , default="test" , help="which subset to evaluate typically train/val/test" )
parser.add_argument("--task" , type=lowerCAmelCase , default="summarization" , help="used for task_specific_params + metrics" )
parser.add_argument("--bs" , type=lowerCAmelCase , default=8 , required=lowerCAmelCase , help="batch size" )
parser.add_argument(
"--local_rank" , type=lowerCAmelCase , default=-1 , required=lowerCAmelCase , help="should be passed by distributed.launch" )
parser.add_argument(
"--n_obs" , type=lowerCAmelCase , default=lowerCAmelCase , required=lowerCAmelCase , help="How many observations. Defaults to all." )
parser.add_argument(
"--num_return_sequences" , type=lowerCAmelCase , default=1 , required=lowerCAmelCase , help="How many sequences to return" )
parser.add_argument(
"--sync_timeout" , type=lowerCAmelCase , default=600 , required=lowerCAmelCase , help="How long should master process wait for other processes to finish." , )
parser.add_argument("--src_lang" , type=lowerCAmelCase , default=lowerCAmelCase , required=lowerCAmelCase )
parser.add_argument("--tgt_lang" , type=lowerCAmelCase , default=lowerCAmelCase , required=lowerCAmelCase )
parser.add_argument(
"--prefix" , type=lowerCAmelCase , required=lowerCAmelCase , default=lowerCAmelCase , help="will be added to the begininng of src examples" )
parser.add_argument("--fp16" , action="store_true" )
parser.add_argument("--debug" , action="store_true" )
UpperCAmelCase__ : Optional[int] = time.time()
UpperCAmelCase__ , UpperCAmelCase__ : List[Any] = parser.parse_known_args()
UpperCAmelCase__ : int = parse_numeric_n_bool_cl_kwargs(lowerCAmelCase )
if generate_kwargs and args.local_rank <= 0:
print(F"parsed the following generate kwargs: {generate_kwargs}" )
UpperCAmelCase__ : Dict = Path(args.save_dir + "_tmp" )
Path(lowerCAmelCase ).mkdir(exist_ok=lowerCAmelCase ) # this handles locking.
UpperCAmelCase__ : List[str] = list(json_save_dir.glob("rank_*.json" ) )
if intermediate_files:
raise ValueError(F"Found files at {json_save_dir} please move or remove them." )
# In theory, a node could finish and save before another node hits this. If this happens, we can address later.
UpperCAmelCase__ : List[str] = {}
if args.src_lang is not None:
UpperCAmelCase__ : str = args.src_lang
if args.tgt_lang is not None:
UpperCAmelCase__ : List[str] = args.tgt_lang
Path(args.save_dir ).mkdir(exist_ok=lowerCAmelCase )
UpperCAmelCase__ , UpperCAmelCase__ : Optional[int] = eval_data_dir(
args.data_dir , lowerCAmelCase , args.model_name , type_path=args.type_path , bs=args.bs , fpaa=args.fpaa , task=args.task , local_rank=args.local_rank , n_obs=args.n_obs , max_source_length=args.max_source_length , num_return_sequences=args.num_return_sequences , prefix=args.prefix , dataset_kwargs=lowerCAmelCase , **lowerCAmelCase , )
if args.local_rank <= 0:
UpperCAmelCase__ : str = Path(args.save_dir )
save_dir.mkdir(exist_ok=lowerCAmelCase )
UpperCAmelCase__ : Tuple = gather_results_from_each_node(lowerCAmelCase , lowerCAmelCase , args.sync_timeout )
UpperCAmelCase__ : Union[str, Any] = combine_partial_results(lowerCAmelCase )
if args.num_return_sequences > 1:
UpperCAmelCase__ : int = save_dir.joinpath("pseudolabel_results.json" )
print(F"Saving aggregated results at {save_path}, intermediate in {json_save_dir}/" )
save_json(lowerCAmelCase , lowerCAmelCase )
return
UpperCAmelCase__ : Optional[Any] = Path(args.data_dir ).joinpath(args.type_path + ".target" )
with open(lowerCAmelCase ) as f:
UpperCAmelCase__ : Optional[int] = [x.rstrip() for x in f.readlines()][: len(lowerCAmelCase )]
# Calculate metrics, save metrics, and save _generations.txt
UpperCAmelCase__ : List[Any] = "translation" in args.task
UpperCAmelCase__ : Optional[Any] = calculate_bleu if calc_bleu else calculate_rouge
UpperCAmelCase__ : Optional[Any] = "bleu" if calc_bleu else "rouge"
UpperCAmelCase__ : Dict = score_fn(lowerCAmelCase , lowerCAmelCase )
UpperCAmelCase__ : List[Any] = len(lowerCAmelCase )
UpperCAmelCase__ : Union[str, Any] = time.time() - start_time
UpperCAmelCase__ : Optional[int] = round(runtime / metrics["n_obs"] , 4 )
UpperCAmelCase__ : Tuple = num_replicas
# TODO(@stas00): add whatever metadata to metrics
UpperCAmelCase__ : Any = save_dir.joinpath(F"{args.type_path}_{metric_name}.json" )
save_json(lowerCAmelCase , lowerCAmelCase , indent=lowerCAmelCase )
print(lowerCAmelCase )
write_txt_file(lowerCAmelCase , save_dir.joinpath(F"{args.type_path}_generations.txt" ) )
if args.debug:
write_txt_file(lowerCAmelCase , save_dir.joinpath(F"{args.type_path}.target" ) )
else:
shutil.rmtree(lowerCAmelCase )
def a__ ( lowerCAmelCase : Tuple ):
'''simple docstring'''
UpperCAmelCase__ : str = []
for partial_result in partial_results:
records.extend(lowerCAmelCase )
UpperCAmelCase__ : Dict = sorted(lowerCAmelCase , key=lambda lowerCAmelCase : x["id"] )
UpperCAmelCase__ : List[str] = [x["pred"] for x in records]
return preds
def a__ ( lowerCAmelCase : List[Any] , lowerCAmelCase : int , lowerCAmelCase : Optional[int] ):
'''simple docstring'''
# WAIT FOR lots of .json files
UpperCAmelCase__ : int = time.time()
logger.info("waiting for all nodes to finish" )
UpperCAmelCase__ : Dict = None
while (time.time() - start_wait) < timeout:
UpperCAmelCase__ : str = list(save_dir.glob("rank_*.json" ) )
if len(lowerCAmelCase ) < num_replicas:
continue
try:
# make sure all json files are fully saved
UpperCAmelCase__ : Union[str, Any] = lmap(lowerCAmelCase , lowerCAmelCase )
return json_data
except JSONDecodeError:
continue
else:
raise TimeoutError("Rank 0 gave up on waiting for other processes" )
# Unreachable
if __name__ == "__main__":
# Usage for MT:
run_generate()
| 660 |
"""simple docstring"""
from typing import Optional, Union
import torch
from torch import nn
from ...configuration_utils import ConfigMixin, register_to_config
from ...models.modeling_utils import ModelMixin
class _lowercase ( lowerCAmelCase_ , lowerCAmelCase_ ):
'''simple docstring'''
@register_to_config
def __init__( self , __UpperCamelCase = 7_68 , )-> Union[str, Any]:
super().__init__()
UpperCAmelCase__ : str = nn.Parameter(torch.zeros(1 , __UpperCamelCase ) )
UpperCAmelCase__ : Optional[int] = nn.Parameter(torch.ones(1 , __UpperCamelCase ) )
def lowerCAmelCase__ ( self , __UpperCamelCase = None , __UpperCamelCase = None , )-> Any:
UpperCAmelCase__ : Dict = nn.Parameter(self.mean.to(__UpperCamelCase ).to(__UpperCamelCase ) )
UpperCAmelCase__ : Any = nn.Parameter(self.std.to(__UpperCamelCase ).to(__UpperCamelCase ) )
return self
def lowerCAmelCase__ ( self , __UpperCamelCase )-> Union[str, Any]:
UpperCAmelCase__ : Dict = (embeds - self.mean) * 1.0 / self.std
return embeds
def lowerCAmelCase__ ( self , __UpperCamelCase )-> List[Any]:
UpperCAmelCase__ : Any = (embeds * self.std) + self.mean
return embeds
| 660 | 1 |
"""simple docstring"""
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
A__ : List[str] = logging.get_logger(__name__)
A__ : str = """▁"""
A__ : Union[str, Any] = {"""vocab_file""": """sentencepiece.bpe.model"""}
A__ : Optional[Any] = {
"""vocab_file""": {
"""xlm-roberta-base""": """https://huggingface.co/xlm-roberta-base/resolve/main/sentencepiece.bpe.model""",
"""xlm-roberta-large""": """https://huggingface.co/xlm-roberta-large/resolve/main/sentencepiece.bpe.model""",
"""xlm-roberta-large-finetuned-conll02-dutch""": (
"""https://huggingface.co/xlm-roberta-large-finetuned-conll02-dutch/resolve/main/sentencepiece.bpe.model"""
),
"""xlm-roberta-large-finetuned-conll02-spanish""": (
"""https://huggingface.co/xlm-roberta-large-finetuned-conll02-spanish/resolve/main/sentencepiece.bpe.model"""
),
"""xlm-roberta-large-finetuned-conll03-english""": (
"""https://huggingface.co/xlm-roberta-large-finetuned-conll03-english/resolve/main/sentencepiece.bpe.model"""
),
"""xlm-roberta-large-finetuned-conll03-german""": (
"""https://huggingface.co/xlm-roberta-large-finetuned-conll03-german/resolve/main/sentencepiece.bpe.model"""
),
}
}
A__ : Dict = {
"""xlm-roberta-base""": 512,
"""xlm-roberta-large""": 512,
"""xlm-roberta-large-finetuned-conll02-dutch""": 512,
"""xlm-roberta-large-finetuned-conll02-spanish""": 512,
"""xlm-roberta-large-finetuned-conll03-english""": 512,
"""xlm-roberta-large-finetuned-conll03-german""": 512,
}
class _lowercase ( lowerCAmelCase_ ):
'''simple docstring'''
_A = VOCAB_FILES_NAMES
_A = PRETRAINED_VOCAB_FILES_MAP
_A = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_A = ['input_ids', 'attention_mask']
def __init__( self , __UpperCamelCase , __UpperCamelCase="<s>" , __UpperCamelCase="</s>" , __UpperCamelCase="</s>" , __UpperCamelCase="<s>" , __UpperCamelCase="<unk>" , __UpperCamelCase="<pad>" , __UpperCamelCase="<mask>" , __UpperCamelCase = None , **__UpperCamelCase , )-> None:
# Mask token behave like a normal word, i.e. include the space before it
UpperCAmelCase__ : Any = AddedToken(__UpperCamelCase , lstrip=__UpperCamelCase , rstrip=__UpperCamelCase ) if isinstance(__UpperCamelCase , __UpperCamelCase ) else mask_token
UpperCAmelCase__ : Union[str, Any] = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=__UpperCamelCase , eos_token=__UpperCamelCase , unk_token=__UpperCamelCase , sep_token=__UpperCamelCase , cls_token=__UpperCamelCase , pad_token=__UpperCamelCase , mask_token=__UpperCamelCase , sp_model_kwargs=self.sp_model_kwargs , **__UpperCamelCase , )
UpperCAmelCase__ : int = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(__UpperCamelCase ) )
UpperCAmelCase__ : Tuple = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# Mimic fairseq token-to-id alignment for the first 4 token
UpperCAmelCase__ : int = {"<s>": 0, "<pad>": 1, "</s>": 2, "<unk>": 3}
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
UpperCAmelCase__ : Tuple = 1
UpperCAmelCase__ : Optional[int] = len(self.sp_model ) + self.fairseq_offset
UpperCAmelCase__ : str = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def __getstate__( self )-> Any:
UpperCAmelCase__ : Union[str, Any] = self.__dict__.copy()
UpperCAmelCase__ : List[str] = None
UpperCAmelCase__ : Union[str, Any] = self.sp_model.serialized_model_proto()
return state
def __setstate__( self , __UpperCamelCase )-> Union[str, Any]:
UpperCAmelCase__ : List[str] = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
UpperCAmelCase__ : List[Any] = {}
UpperCAmelCase__ : Dict = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
def lowerCAmelCase__ ( self , __UpperCamelCase , __UpperCamelCase = None )-> List[int]:
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
UpperCAmelCase__ : Optional[int] = [self.cls_token_id]
UpperCAmelCase__ : Any = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def lowerCAmelCase__ ( self , __UpperCamelCase , __UpperCamelCase = None , __UpperCamelCase = False )-> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__UpperCamelCase , token_ids_a=__UpperCamelCase , already_has_special_tokens=__UpperCamelCase )
if token_ids_a is None:
return [1] + ([0] * len(__UpperCamelCase )) + [1]
return [1] + ([0] * len(__UpperCamelCase )) + [1, 1] + ([0] * len(__UpperCamelCase )) + [1]
def lowerCAmelCase__ ( self , __UpperCamelCase , __UpperCamelCase = None )-> List[int]:
UpperCAmelCase__ : List[Any] = [self.sep_token_id]
UpperCAmelCase__ : Dict = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def lowerCAmelCase__ ( self )-> Dict:
return len(self.sp_model ) + self.fairseq_offset + 1 # Add the <mask> token
def lowerCAmelCase__ ( self )-> Any:
UpperCAmelCase__ : List[str] = {self.convert_ids_to_tokens(__UpperCamelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def lowerCAmelCase__ ( self , __UpperCamelCase )-> List[str]:
return self.sp_model.encode(__UpperCamelCase , out_type=__UpperCamelCase )
def lowerCAmelCase__ ( self , __UpperCamelCase )-> Tuple:
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
UpperCAmelCase__ : str = self.sp_model.PieceToId(__UpperCamelCase )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def lowerCAmelCase__ ( self , __UpperCamelCase )-> Tuple:
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def lowerCAmelCase__ ( self , __UpperCamelCase )-> str:
UpperCAmelCase__ : List[Any] = "".join(__UpperCamelCase ).replace(__UpperCamelCase , " " ).strip()
return out_string
def lowerCAmelCase__ ( self , __UpperCamelCase , __UpperCamelCase = None )-> Tuple[str]:
if not os.path.isdir(__UpperCamelCase ):
logger.error(F"Vocabulary path ({save_directory}) should be a directory" )
return
UpperCAmelCase__ : List[str] = os.path.join(
__UpperCamelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__UpperCamelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , __UpperCamelCase )
elif not os.path.isfile(self.vocab_file ):
with open(__UpperCamelCase , "wb" ) as fi:
UpperCAmelCase__ : str = self.sp_model.serialized_model_proto()
fi.write(__UpperCamelCase )
return (out_vocab_file,)
| 660 |
"""simple docstring"""
import argparse
import torch
from transformers import GPTaConfig, GPTaModel, load_tf_weights_in_gpta
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
logging.set_verbosity_info()
def a__ ( lowerCAmelCase : List[Any] , lowerCAmelCase : Any , lowerCAmelCase : List[Any] ):
'''simple docstring'''
# Construct model
if gpta_config_file == "":
UpperCAmelCase__ : Optional[int] = GPTaConfig()
else:
UpperCAmelCase__ : Dict = GPTaConfig.from_json_file(lowerCAmelCase )
UpperCAmelCase__ : Optional[Any] = GPTaModel(lowerCAmelCase )
# Load weights from numpy
load_tf_weights_in_gpta(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
# Save pytorch-model
UpperCAmelCase__ : Optional[int] = pytorch_dump_folder_path + "/" + WEIGHTS_NAME
UpperCAmelCase__ : Any = pytorch_dump_folder_path + "/" + CONFIG_NAME
print(F"Save PyTorch model to {pytorch_weights_dump_path}" )
torch.save(model.state_dict() , lowerCAmelCase )
print(F"Save configuration file to {pytorch_config_dump_path}" )
with open(lowerCAmelCase , "w" , encoding="utf-8" ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
A__ : Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--gpt2_checkpoint_path""", default=None, type=str, required=True, help="""Path to the TensorFlow checkpoint path."""
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
parser.add_argument(
"""--gpt2_config_file""",
default="""""",
type=str,
help=(
"""An optional config json file corresponding to the pre-trained OpenAI model. \n"""
"""This specifies the model architecture."""
),
)
A__ : Optional[Any] = parser.parse_args()
convert_gpta_checkpoint_to_pytorch(args.gpta_checkpoint_path, args.gpta_config_file, args.pytorch_dump_folder_path)
| 660 | 1 |
"""simple docstring"""
import os
from typing import List, Optional, Union
from ...tokenization_utils import PreTrainedTokenizer
from ...tokenization_utils_base import AddedToken
from ...utils import logging
A__ : Optional[Any] = logging.get_logger(__name__)
A__ : Union[str, Any] = {"""vocab_file""": """vocab.txt"""}
A__ : Dict = {
"""vocab_file""": {
"""facebook/esm2_t6_8M_UR50D""": """https://huggingface.co/facebook/esm2_t6_8M_UR50D/resolve/main/vocab.txt""",
"""facebook/esm2_t12_35M_UR50D""": """https://huggingface.co/facebook/esm2_t12_35M_UR50D/resolve/main/vocab.txt""",
},
}
A__ : str = {
"""facebook/esm2_t6_8M_UR50D""": 1_024,
"""facebook/esm2_t12_35M_UR50D""": 1_024,
}
def a__ ( lowerCAmelCase : Dict ):
'''simple docstring'''
with open(lowerCAmelCase , "r" ) as f:
UpperCAmelCase__ : Optional[Any] = f.read().splitlines()
return [l.strip() for l in lines]
class _lowercase ( lowerCAmelCase_ ):
'''simple docstring'''
_A = VOCAB_FILES_NAMES
_A = PRETRAINED_VOCAB_FILES_MAP
_A = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_A = ['input_ids', 'attention_mask']
def __init__( self , __UpperCamelCase , __UpperCamelCase="<unk>" , __UpperCamelCase="<cls>" , __UpperCamelCase="<pad>" , __UpperCamelCase="<mask>" , __UpperCamelCase="<eos>" , **__UpperCamelCase , )-> Tuple:
super().__init__(**__UpperCamelCase )
UpperCAmelCase__ : str = load_vocab_file(__UpperCamelCase )
UpperCAmelCase__ : List[str] = dict(enumerate(self.all_tokens ) )
UpperCAmelCase__ : str = {tok: ind for ind, tok in enumerate(self.all_tokens )}
UpperCAmelCase__ : Union[str, Any] = unk_token
UpperCAmelCase__ : str = cls_token
UpperCAmelCase__ : int = pad_token
UpperCAmelCase__ : Tuple = mask_token
UpperCAmelCase__ : Optional[int] = eos_token
UpperCAmelCase__ : int = self.all_tokens
self._create_trie(self.unique_no_split_tokens )
def lowerCAmelCase__ ( self , __UpperCamelCase )-> str:
return self._id_to_token.get(__UpperCamelCase , self.unk_token )
def lowerCAmelCase__ ( self , __UpperCamelCase )-> int:
return self._token_to_id.get(__UpperCamelCase , self._token_to_id.get(self.unk_token ) )
def lowerCAmelCase__ ( self , __UpperCamelCase , **__UpperCamelCase )-> Any:
return text.split()
def lowerCAmelCase__ ( self , __UpperCamelCase=False )-> List[str]:
return len(self._id_to_token )
def lowerCAmelCase__ ( self )-> Tuple:
return {token: i for i, token in enumerate(self.all_tokens )}
def lowerCAmelCase__ ( self , __UpperCamelCase )-> int:
return self._token_to_id.get(__UpperCamelCase , self._token_to_id.get(self.unk_token ) )
def lowerCAmelCase__ ( self , __UpperCamelCase )-> str:
return self._id_to_token.get(__UpperCamelCase , self.unk_token )
def lowerCAmelCase__ ( self , __UpperCamelCase , __UpperCamelCase = None )-> List[int]:
UpperCAmelCase__ : List[str] = [self.cls_token_id]
UpperCAmelCase__ : int = [self.eos_token_id] # No sep token in ESM vocabulary
if token_ids_a is None:
if self.eos_token_id is None:
return cls + token_ids_a
else:
return cls + token_ids_a + sep
elif self.eos_token_id is None:
raise ValueError("Cannot tokenize multiple sequences when EOS token is not set!" )
return cls + token_ids_a + sep + token_ids_a + sep # Multiple inputs always have an EOS token
def lowerCAmelCase__ ( self , __UpperCamelCase , __UpperCamelCase = None , __UpperCamelCase = False )-> List[int]:
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
"You should not supply a second sequence if the provided sequence of "
"ids is already formatted with special tokens for the model." )
return [1 if token in self.all_special_ids else 0 for token in token_ids_a]
UpperCAmelCase__ : Any = [1] + ([0] * len(__UpperCamelCase )) + [1]
if token_ids_a is not None:
mask += [0] * len(__UpperCamelCase ) + [1]
return mask
def lowerCAmelCase__ ( self , __UpperCamelCase , __UpperCamelCase )-> Optional[Any]:
UpperCAmelCase__ : Dict = os.path.join(__UpperCamelCase , (filename_prefix + "-" if filename_prefix else "") + "vocab.txt" )
with open(__UpperCamelCase , "w" ) as f:
f.write("\n".join(self.all_tokens ) )
return (vocab_file,)
@property
def lowerCAmelCase__ ( self )-> int:
return self.get_vocab_size(with_added_tokens=__UpperCamelCase )
def lowerCAmelCase__ ( self , __UpperCamelCase , __UpperCamelCase = False )-> int:
return super()._add_tokens(__UpperCamelCase , special_tokens=__UpperCamelCase )
| 660 |
"""simple docstring"""
import argparse
import os
import torch
from transformers.utils import WEIGHTS_NAME
A__ : Optional[int] = ["""small""", """medium""", """large"""]
A__ : Optional[int] = """lm_head.decoder.weight"""
A__ : Dict = """lm_head.weight"""
def a__ ( lowerCAmelCase : str , lowerCAmelCase : str ):
'''simple docstring'''
UpperCAmelCase__ : Dict = torch.load(lowerCAmelCase )
UpperCAmelCase__ : Optional[int] = d.pop(lowerCAmelCase )
os.makedirs(lowerCAmelCase , exist_ok=lowerCAmelCase )
torch.save(lowerCAmelCase , os.path.join(lowerCAmelCase , lowerCAmelCase ) )
if __name__ == "__main__":
A__ : List[Any] = argparse.ArgumentParser()
parser.add_argument("""--dialogpt_path""", default=""".""", type=str)
A__ : Tuple = parser.parse_args()
for MODEL in DIALOGPT_MODELS:
A__ : Tuple = os.path.join(args.dialogpt_path, f"""{MODEL}_ft.pkl""")
A__ : str = f"""./DialoGPT-{MODEL}"""
convert_dialogpt_checkpoint(
checkpoint_path,
pytorch_dump_folder_path,
)
| 660 | 1 |
"""simple docstring"""
import copy
import tempfile
import unittest
from transformers import MaMaaaConfig, is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from transformers.utils import cached_property
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MaMaaaForConditionalGeneration, MaMaaaModel, MaMaaaTokenizer
from transformers.models.mam_aaa.modeling_mam_aaa import MaMaaaDecoder, MaMaaaEncoder
def a__ ( lowerCAmelCase : Any , lowerCAmelCase : Any , lowerCAmelCase : List[Any] , lowerCAmelCase : List[str]=None , lowerCAmelCase : int=None , lowerCAmelCase : List[Any]=None , lowerCAmelCase : List[str]=None , lowerCAmelCase : str=None , ):
'''simple docstring'''
if attention_mask is None:
UpperCAmelCase__ : Optional[Any] = input_ids.ne(config.pad_token_id )
if decoder_attention_mask is None:
UpperCAmelCase__ : Any = decoder_input_ids.ne(config.pad_token_id )
if head_mask is None:
UpperCAmelCase__ : Optional[int] = torch.ones(config.encoder_layers , config.encoder_attention_heads , device=lowerCAmelCase )
if decoder_head_mask is None:
UpperCAmelCase__ : Optional[Any] = torch.ones(config.decoder_layers , config.decoder_attention_heads , device=lowerCAmelCase )
if cross_attn_head_mask is None:
UpperCAmelCase__ : Optional[int] = torch.ones(config.decoder_layers , config.decoder_attention_heads , device=lowerCAmelCase )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
class _lowercase :
'''simple docstring'''
def __init__( self , __UpperCamelCase , __UpperCamelCase=13 , __UpperCamelCase=7 , __UpperCamelCase=True , __UpperCamelCase=False , __UpperCamelCase=99 , __UpperCamelCase=16 , __UpperCamelCase=2 , __UpperCamelCase=4 , __UpperCamelCase=4 , __UpperCamelCase="relu" , __UpperCamelCase=0.1 , __UpperCamelCase=0.1 , __UpperCamelCase=0.0 , __UpperCamelCase=0.0 , __UpperCamelCase=20 , __UpperCamelCase=2 , __UpperCamelCase=1 , __UpperCamelCase=0 , )-> Dict:
UpperCAmelCase__ : Any = parent
UpperCAmelCase__ : Dict = batch_size
UpperCAmelCase__ : Tuple = seq_length
UpperCAmelCase__ : Dict = is_training
UpperCAmelCase__ : List[Any] = use_labels
UpperCAmelCase__ : Dict = vocab_size
UpperCAmelCase__ : Union[str, Any] = hidden_size
UpperCAmelCase__ : Optional[int] = num_hidden_layers
UpperCAmelCase__ : List[str] = num_attention_heads
UpperCAmelCase__ : str = intermediate_size
UpperCAmelCase__ : Optional[int] = hidden_act
UpperCAmelCase__ : Optional[int] = hidden_dropout_prob
UpperCAmelCase__ : Optional[Any] = attention_probs_dropout_prob
UpperCAmelCase__ : Optional[Any] = encoder_layerdrop
UpperCAmelCase__ : List[str] = decoder_layerdrop
UpperCAmelCase__ : Optional[int] = max_position_embeddings
UpperCAmelCase__ : List[str] = eos_token_id
UpperCAmelCase__ : Union[str, Any] = pad_token_id
UpperCAmelCase__ : int = bos_token_id
def lowerCAmelCase__ ( self )-> Tuple:
UpperCAmelCase__ : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCAmelCase__ : Optional[Any] = self.eos_token_id # Eos Token
UpperCAmelCase__ : int = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
# we need to clamp the input ids here to avoid having pad token in between
# this is because for M2M100 the position_ids are prepared such that
# all pad tokens have pos id = 2 and rest are between 2..seq_length
# and the seq_length here is seq_length - num_pad_tokens
# but when using past, there is no way of knowing if the past input ids had
# pad tokens in them, which results in incorrect seq_lenth and which in turn results in
# position_ids being off by num_pad_tokens in past input
UpperCAmelCase__ : Tuple = input_ids.clamp(self.pad_token_id + 1 )
UpperCAmelCase__ : List[str] = decoder_input_ids.clamp(self.pad_token_id + 1 )
UpperCAmelCase__ : Dict = self.get_config()
UpperCAmelCase__ : Optional[int] = prepare_mam_aaa_inputs_dict(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
return config, inputs_dict
def lowerCAmelCase__ ( self )-> Optional[Any]:
return MaMaaaConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , encoder_layerdrop=self.encoder_layerdrop , decoder_layerdrop=self.decoder_layerdrop , max_position_embeddings=self.max_position_embeddings , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , )
def lowerCAmelCase__ ( self )-> List[str]:
UpperCAmelCase__ , UpperCAmelCase__ : Dict = self.prepare_config_and_inputs()
return config, inputs_dict
def lowerCAmelCase__ ( self , __UpperCamelCase , __UpperCamelCase )-> Dict:
UpperCAmelCase__ : Optional[Any] = MaMaaaModel(config=__UpperCamelCase ).get_decoder().to(__UpperCamelCase ).eval()
UpperCAmelCase__ : Dict = inputs_dict["input_ids"]
UpperCAmelCase__ : str = inputs_dict["attention_mask"]
UpperCAmelCase__ : List[str] = inputs_dict["head_mask"]
# first forward pass
UpperCAmelCase__ : Dict = model(__UpperCamelCase , attention_mask=__UpperCamelCase , head_mask=__UpperCamelCase , use_cache=__UpperCamelCase )
UpperCAmelCase__ , UpperCAmelCase__ : str = outputs.to_tuple()
# create hypothetical multiple next token and extent to next_input_ids
UpperCAmelCase__ : str = ids_tensor((self.batch_size, 3) , config.vocab_size )
UpperCAmelCase__ : int = ids_tensor((self.batch_size, 3) , 2 )
# append to next input_ids and
UpperCAmelCase__ : Union[str, Any] = torch.cat([input_ids, next_tokens] , dim=-1 )
UpperCAmelCase__ : List[str] = torch.cat([attention_mask, next_attn_mask] , dim=-1 )
UpperCAmelCase__ : Optional[Any] = model(__UpperCamelCase , attention_mask=__UpperCamelCase )["last_hidden_state"]
UpperCAmelCase__ : Optional[Any] = model(__UpperCamelCase , attention_mask=__UpperCamelCase , past_key_values=__UpperCamelCase )[
"last_hidden_state"
]
# select random slice
UpperCAmelCase__ : Tuple = ids_tensor((1,) , output_from_past.shape[-1] ).item()
UpperCAmelCase__ : str = output_from_no_past[:, -3:, random_slice_idx].detach()
UpperCAmelCase__ : Union[str, Any] = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(__UpperCamelCase , __UpperCamelCase , atol=1E-2 ) )
def lowerCAmelCase__ ( self , __UpperCamelCase , __UpperCamelCase )-> Any:
UpperCAmelCase__ : Tuple = MaMaaaModel(config=__UpperCamelCase ).to(__UpperCamelCase ).eval()
UpperCAmelCase__ : List[str] = model(**__UpperCamelCase )
UpperCAmelCase__ : str = outputs.encoder_last_hidden_state
UpperCAmelCase__ : str = outputs.last_hidden_state
with tempfile.TemporaryDirectory() as tmpdirname:
UpperCAmelCase__ : List[str] = model.get_encoder()
encoder.save_pretrained(__UpperCamelCase )
UpperCAmelCase__ : Dict = MaMaaaEncoder.from_pretrained(__UpperCamelCase ).to(__UpperCamelCase )
UpperCAmelCase__ : Optional[Any] = encoder(inputs_dict["input_ids"] , attention_mask=inputs_dict["attention_mask"] )[
0
]
self.parent.assertTrue((encoder_last_hidden_state_a - encoder_last_hidden_state).abs().max().item() < 1E-3 )
with tempfile.TemporaryDirectory() as tmpdirname:
UpperCAmelCase__ : Optional[Any] = model.get_decoder()
decoder.save_pretrained(__UpperCamelCase )
UpperCAmelCase__ : Any = MaMaaaDecoder.from_pretrained(__UpperCamelCase ).to(__UpperCamelCase )
UpperCAmelCase__ : Union[str, Any] = decoder(
input_ids=inputs_dict["decoder_input_ids"] , attention_mask=inputs_dict["decoder_attention_mask"] , encoder_hidden_states=__UpperCamelCase , encoder_attention_mask=inputs_dict["attention_mask"] , )[0]
self.parent.assertTrue((last_hidden_state_a - last_hidden_state).abs().max().item() < 1E-3 )
@require_torch
class _lowercase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , unittest.TestCase ):
'''simple docstring'''
_A = (
(
MaMaaaModel,
MaMaaaForConditionalGeneration,
)
if is_torch_available()
else ()
)
_A = (MaMaaaForConditionalGeneration,) if is_torch_available() else ()
_A = (
{
'conversational': MaMaaaForConditionalGeneration,
'feature-extraction': MaMaaaModel,
'summarization': MaMaaaForConditionalGeneration,
'text2text-generation': MaMaaaForConditionalGeneration,
'translation': MaMaaaForConditionalGeneration,
}
if is_torch_available()
else {}
)
_A = True
_A = True
_A = False
_A = False
def lowerCAmelCase__ ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )-> Optional[Any]:
if pipeline_test_casse_name == "TranslationPipelineTests":
# Get `ValueError: Translation requires a `src_lang` and a `tgt_lang` for this model`.
# `M2M100Config` was never used in pipeline tests: cannot create a simple tokenizer.
return True
return False
def lowerCAmelCase__ ( self )-> str:
UpperCAmelCase__ : Optional[int] = MaMaaaModelTester(self )
UpperCAmelCase__ : Optional[Any] = ConfigTester(self , config_class=__UpperCamelCase )
def lowerCAmelCase__ ( self )-> Optional[int]:
self.config_tester.run_common_tests()
def lowerCAmelCase__ ( self )-> Optional[int]:
UpperCAmelCase__ , UpperCAmelCase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
UpperCAmelCase__ : Optional[Any] = model_class(__UpperCamelCase )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(__UpperCamelCase )
UpperCAmelCase__ , UpperCAmelCase__ : Optional[int] = model_class.from_pretrained(__UpperCamelCase , output_loading_info=__UpperCamelCase )
self.assertEqual(info["missing_keys"] , [] )
def lowerCAmelCase__ ( self )-> List[Any]:
UpperCAmelCase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_decoder_model_past_large_inputs(*__UpperCamelCase )
def lowerCAmelCase__ ( self )-> Dict:
UpperCAmelCase__ : str = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_encoder_decoder_model_standalone(*__UpperCamelCase )
def lowerCAmelCase__ ( self )-> Any:
UpperCAmelCase__ , UpperCAmelCase__ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in (MaMaaaModel, MaMaaaForConditionalGeneration):
UpperCAmelCase__ : List[str] = model_class(__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
UpperCAmelCase__ : List[Any] = copy.deepcopy(self._prepare_for_class(__UpperCamelCase , __UpperCamelCase ) )
if not self.is_encoder_decoder:
UpperCAmelCase__ : Tuple = inputs["input_ids"]
del inputs["input_ids"]
else:
UpperCAmelCase__ : Tuple = inputs["input_ids"]
UpperCAmelCase__ : Any = inputs.get("decoder_input_ids" , __UpperCamelCase )
del inputs["input_ids"]
inputs.pop("decoder_input_ids" , __UpperCamelCase )
UpperCAmelCase__ : int = model.get_input_embeddings()
if not self.is_encoder_decoder:
UpperCAmelCase__ : int = wte(__UpperCamelCase )
else:
UpperCAmelCase__ : Optional[int] = wte(__UpperCamelCase )
UpperCAmelCase__ : int = wte(__UpperCamelCase )
with torch.no_grad():
model(**__UpperCamelCase )[0]
def lowerCAmelCase__ ( self )-> Dict:
UpperCAmelCase__ , UpperCAmelCase__ : List[Any] = self.model_tester.prepare_config_and_inputs()
UpperCAmelCase__ : Any = input_dict["input_ids"]
UpperCAmelCase__ : Union[str, Any] = input_ids.ne(1 ).to(__UpperCamelCase )
UpperCAmelCase__ : Optional[Any] = MaMaaaForConditionalGeneration(__UpperCamelCase ).eval().to(__UpperCamelCase )
if torch_device == "cuda":
model.half()
model.generate(__UpperCamelCase , attention_mask=__UpperCamelCase )
model.generate(num_beams=4 , do_sample=__UpperCamelCase , early_stopping=__UpperCamelCase , num_return_sequences=3 )
def a__ ( lowerCAmelCase : Optional[int] ):
'''simple docstring'''
return torch.tensor(lowerCAmelCase , dtype=torch.long , device=lowerCAmelCase )
A__ : Dict = 1e-4
@require_torch
@require_sentencepiece
@require_tokenizers
@slow
class _lowercase ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def lowerCAmelCase__ ( self )-> Tuple:
return MaMaaaTokenizer.from_pretrained("facebook/m2m100_418M" )
def lowerCAmelCase__ ( self )-> int:
UpperCAmelCase__ : Union[str, Any] = MaMaaaModel.from_pretrained("facebook/m2m100_418M" ).to(__UpperCamelCase )
UpperCAmelCase__ : Optional[int] = _long_tensor([[12_80_28, 98, 12, 3_05_27, 27_32, 1_59, 77_55, 6_19_04, 3_91_44, 38, 2]] )
UpperCAmelCase__ : Optional[int] = _long_tensor([[2, 12_80_28, 98, 12, 3_05_27, 27_32, 1_59, 77_55, 6_19_04, 3_91_44, 38]] )
UpperCAmelCase__ : Optional[Any] = prepare_mam_aaa_inputs_dict(model.config , __UpperCamelCase , __UpperCamelCase )
with torch.no_grad():
UpperCAmelCase__ : Dict = model(**__UpperCamelCase )[0]
UpperCAmelCase__ : Any = torch.Size((1, 11, 10_24) )
self.assertEqual(output.shape , __UpperCamelCase )
# change to expected output here
UpperCAmelCase__ : Dict = torch.tensor(
[[-0.7780, -0.1676, 0.1038], [-6.7556, -1.3992, 0.0567], [-7.5383, -0.5920, -0.2779]] , device=__UpperCamelCase )
self.assertTrue(torch.allclose(output[:, :3, :3] , __UpperCamelCase , atol=__UpperCamelCase ) )
def lowerCAmelCase__ ( self )-> Tuple:
UpperCAmelCase__ : Optional[Any] = MaMaaaForConditionalGeneration.from_pretrained("facebook/m2m100_418M" ).to(__UpperCamelCase )
# change to intended input
UpperCAmelCase__ : Union[str, Any] = _long_tensor([[12_80_28, 98, 12, 3_05_27, 27_32, 1_59, 77_55, 6_19_04, 3_91_44, 38, 2]] )
UpperCAmelCase__ : List[str] = _long_tensor([[2, 12_80_28, 98, 12, 3_05_27, 27_32, 1_59, 77_55, 6_19_04, 3_91_44, 38]] )
UpperCAmelCase__ : List[Any] = prepare_mam_aaa_inputs_dict(model.config , __UpperCamelCase , __UpperCamelCase )
with torch.no_grad():
UpperCAmelCase__ : Optional[int] = model(**__UpperCamelCase )[0]
UpperCAmelCase__ : str = torch.Size((1, 11, model.config.vocab_size) )
self.assertEqual(output.shape , __UpperCamelCase )
# change to expected output here
UpperCAmelCase__ : Tuple = torch.tensor(
[[-1.0448, -1.0411, 3.7992], [-3.2191, -3.2386, -1.3451], [-3.6210, -3.5993, 0.4925]] , device=__UpperCamelCase )
self.assertTrue(torch.allclose(output[:, :3, :3] , __UpperCamelCase , atol=__UpperCamelCase ) )
def lowerCAmelCase__ ( self )-> Tuple:
UpperCAmelCase__ : Union[str, Any] = MaMaaaForConditionalGeneration.from_pretrained("facebook/m2m100_418M" ).to(__UpperCamelCase )
UpperCAmelCase__ : List[Any] = MaMaaaTokenizer.from_pretrained("facebook/m2m100_418M" , src_lang="fr" , tgt_lang="en" )
UpperCAmelCase__ : Tuple = [
"L'affaire NSA souligne l'absence totale de débat sur le renseignement",
"Selon moi, il y a deux niveaux de réponse de la part du gouvernement français.",
"Lorsque François Hollande téléphone à Barack Obama ou quand le ministre des affaires étrangères Laurent"
" Fabius convoque l'ambassadeur des Etats-Unis, ils réagissent à une vraie découverte, qui est celle de"
" l'ampleur de la surveillance américaine sur l'ensemble des communications en France.",
]
# The below article tests that we don't add any hypotheses outside of the top n_beams
UpperCAmelCase__ : str = tokenizer(__UpperCamelCase , padding=__UpperCamelCase , return_tensors="pt" )
UpperCAmelCase__ : Dict = model.generate(
input_ids=dct["input_ids"].to(__UpperCamelCase ) , attention_mask=dct["attention_mask"].to(__UpperCamelCase ) , num_beams=5 , forced_bos_token_id=tokenizer.get_lang_id("en" ) , )
UpperCAmelCase__ : int = [
"The NSA case highlights the total absence of intelligence debate",
"I think there are two levels of response from the French government.",
"When François Hollande calls Barack Obama or when Foreign Minister Laurent Fabius calls the U.S."
" Ambassador, they respond to a real discovery, which is that of the scale of U.S. surveillance on all"
" communications in France.",
]
UpperCAmelCase__ : Dict = tokenizer.batch_decode(
hypotheses_batch.tolist() , clean_up_tokenization_spaces=__UpperCamelCase , skip_special_tokens=__UpperCamelCase )
assert generated == expected_en
| 660 |
"""simple docstring"""
from math import isqrt
def a__ ( lowerCAmelCase : int ):
'''simple docstring'''
UpperCAmelCase__ : Tuple = [True] * max_number
for i in range(2 , isqrt(max_number - 1 ) + 1 ):
if is_prime[i]:
for j in range(i**2 , lowerCAmelCase , lowerCAmelCase ):
UpperCAmelCase__ : List[Any] = False
return [i for i in range(2 , lowerCAmelCase ) if is_prime[i]]
def a__ ( lowerCAmelCase : int = 10**8 ):
'''simple docstring'''
UpperCAmelCase__ : Dict = calculate_prime_numbers(max_number // 2 )
UpperCAmelCase__ : Optional[int] = 0
UpperCAmelCase__ : Optional[int] = 0
UpperCAmelCase__ : Tuple = len(lowerCAmelCase ) - 1
while left <= right:
while prime_numbers[left] * prime_numbers[right] >= max_number:
right -= 1
semiprimes_count += right - left + 1
left += 1
return semiprimes_count
if __name__ == "__main__":
print(f"""{solution() = }""")
| 660 | 1 |
"""simple docstring"""
from collections import defaultdict
from math import ceil, sqrt
def a__ ( lowerCAmelCase : int = 100_0000 , lowerCAmelCase : int = 10 ):
'''simple docstring'''
UpperCAmelCase__ : defaultdict = defaultdict(lowerCAmelCase )
for outer_width in range(3 , (t_limit // 4) + 2 ):
if outer_width * outer_width > t_limit:
UpperCAmelCase__ : Tuple = max(
ceil(sqrt(outer_width * outer_width - t_limit ) ) , 1 )
else:
UpperCAmelCase__ : Union[str, Any] = 1
hole_width_lower_bound += (outer_width - hole_width_lower_bound) % 2
for hole_width in range(lowerCAmelCase , outer_width - 1 , 2 ):
count[outer_width * outer_width - hole_width * hole_width] += 1
return sum(1 for n in count.values() if 1 <= n <= 10 )
if __name__ == "__main__":
print(f"""{solution() = }""")
| 660 |
"""simple docstring"""
import unicodedata
from dataclasses import dataclass
from typing import Optional, Union
import numpy as np
from transformers.data.data_collator import DataCollatorMixin
from transformers.file_utils import PaddingStrategy
from transformers.tokenization_utils_base import PreTrainedTokenizerBase
def a__ ( lowerCAmelCase : str , lowerCAmelCase : Optional[Any] , lowerCAmelCase : Dict , lowerCAmelCase : List[Any] ):
'''simple docstring'''
if isinstance(lowerCAmelCase , lowerCAmelCase ):
UpperCAmelCase__ : Optional[int] = np.full((len(lowerCAmelCase ), sequence_length, 2) , lowerCAmelCase )
else:
UpperCAmelCase__ : Optional[Any] = np.full((len(lowerCAmelCase ), sequence_length) , lowerCAmelCase )
for i, tensor in enumerate(lowerCAmelCase ):
if padding_side == "right":
if isinstance(lowerCAmelCase , lowerCAmelCase ):
UpperCAmelCase__ : Dict = tensor[:sequence_length]
else:
UpperCAmelCase__ : Tuple = tensor[:sequence_length]
else:
if isinstance(lowerCAmelCase , lowerCAmelCase ):
UpperCAmelCase__ : Optional[Any] = tensor[:sequence_length]
else:
UpperCAmelCase__ : int = tensor[:sequence_length]
return out_tensor.tolist()
def a__ ( lowerCAmelCase : Optional[int] ):
'''simple docstring'''
UpperCAmelCase__ : Tuple = ord(lowerCAmelCase )
if (cp >= 33 and cp <= 47) or (cp >= 58 and cp <= 64) or (cp >= 91 and cp <= 96) or (cp >= 123 and cp <= 126):
return True
UpperCAmelCase__ : Optional[Any] = unicodedata.category(lowerCAmelCase )
if cat.startswith("P" ):
return True
return False
@dataclass
class _lowercase ( lowerCAmelCase_ ):
'''simple docstring'''
_A = 42
_A = True
_A = None
_A = None
_A = -100
_A = "pt"
def lowerCAmelCase__ ( self , __UpperCamelCase )-> List[str]:
import torch
UpperCAmelCase__ : Optional[Any] = "label" if "label" in features[0].keys() else "labels"
UpperCAmelCase__ : Dict = [feature[label_name] for feature in features] if label_name in features[0].keys() else None
UpperCAmelCase__ : str = self.tokenizer.pad(
__UpperCamelCase , padding=self.padding , max_length=self.max_length , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors="pt" if labels is None else None , )
if labels is None:
return batch
UpperCAmelCase__ : Optional[Any] = torch.tensor(batch["entity_ids"] ).shape[1]
UpperCAmelCase__ : int = self.tokenizer.padding_side
if padding_side == "right":
UpperCAmelCase__ : int = [
list(__UpperCamelCase ) + [self.label_pad_token_id] * (sequence_length - len(__UpperCamelCase )) for label in labels
]
else:
UpperCAmelCase__ : List[Any] = [
[self.label_pad_token_id] * (sequence_length - len(__UpperCamelCase )) + list(__UpperCamelCase ) for label in labels
]
UpperCAmelCase__ : Optional[Any] = [feature["ner_tags"] for feature in features]
UpperCAmelCase__ : int = padding_tensor(__UpperCamelCase , -1 , __UpperCamelCase , __UpperCamelCase )
UpperCAmelCase__ : List[Any] = [feature["original_entity_spans"] for feature in features]
UpperCAmelCase__ : int = padding_tensor(__UpperCamelCase , (-1, -1) , __UpperCamelCase , __UpperCamelCase )
UpperCAmelCase__ : Optional[int] = {k: torch.tensor(__UpperCamelCase , dtype=torch.intaa ) for k, v in batch.items()}
return batch
| 660 | 1 |
"""simple docstring"""
import time
from dataclasses import dataclass
from multiprocessing import Pool
from unittest import TestCase
from unittest.mock import patch
import multiprocess
import numpy as np
import pytest
from datasets.utils.py_utils import (
NestedDataStructure,
asdict,
iflatmap_unordered,
map_nested,
temp_seed,
temporary_assignment,
zip_dict,
)
from .utils import require_tf, require_torch
def a__ ( lowerCAmelCase : List[Any] ): # picklable for multiprocessing
'''simple docstring'''
return x.sum()
def a__ ( lowerCAmelCase : Optional[int] ): # picklable for multiprocessing
'''simple docstring'''
return i + 1
@dataclass
class _lowercase :
'''simple docstring'''
_A = 42
_A = 42
class _lowercase ( lowerCAmelCase_ ):
'''simple docstring'''
def lowerCAmelCase__ ( self )-> Dict:
UpperCAmelCase__ : Union[str, Any] = {}
UpperCAmelCase__ : Dict = []
UpperCAmelCase__ : Dict = 1
UpperCAmelCase__ : List[Any] = [1, 2]
UpperCAmelCase__ : str = {"a": 1, "b": 2}
UpperCAmelCase__ : Any = {"a": [1, 2], "b": [3, 4]}
UpperCAmelCase__ : str = {"a": {"1": 1}, "b": 2}
UpperCAmelCase__ : Union[str, Any] = {"a": 1, "b": 2, "c": 3, "d": 4}
UpperCAmelCase__ : List[Any] = {}
UpperCAmelCase__ : Optional[Any] = []
UpperCAmelCase__ : Optional[Any] = 2
UpperCAmelCase__ : Optional[Any] = [2, 3]
UpperCAmelCase__ : Optional[Any] = {"a": 2, "b": 3}
UpperCAmelCase__ : Any = {"a": [2, 3], "b": [4, 5]}
UpperCAmelCase__ : int = {"a": {"1": 2}, "b": 3}
UpperCAmelCase__ : Optional[Any] = {"a": 2, "b": 3, "c": 4, "d": 5}
self.assertEqual(map_nested(__UpperCamelCase , __UpperCamelCase ) , __UpperCamelCase )
self.assertEqual(map_nested(__UpperCamelCase , __UpperCamelCase ) , __UpperCamelCase )
self.assertEqual(map_nested(__UpperCamelCase , __UpperCamelCase ) , __UpperCamelCase )
self.assertEqual(map_nested(__UpperCamelCase , __UpperCamelCase ) , __UpperCamelCase )
self.assertEqual(map_nested(__UpperCamelCase , __UpperCamelCase ) , __UpperCamelCase )
self.assertEqual(map_nested(__UpperCamelCase , __UpperCamelCase ) , __UpperCamelCase )
self.assertEqual(map_nested(__UpperCamelCase , __UpperCamelCase ) , __UpperCamelCase )
self.assertEqual(map_nested(__UpperCamelCase , __UpperCamelCase ) , __UpperCamelCase )
UpperCAmelCase__ : Optional[Any] = 2
self.assertEqual(map_nested(__UpperCamelCase , __UpperCamelCase , num_proc=__UpperCamelCase ) , __UpperCamelCase )
self.assertEqual(map_nested(__UpperCamelCase , __UpperCamelCase , num_proc=__UpperCamelCase ) , __UpperCamelCase )
self.assertEqual(map_nested(__UpperCamelCase , __UpperCamelCase , num_proc=__UpperCamelCase ) , __UpperCamelCase )
self.assertEqual(map_nested(__UpperCamelCase , __UpperCamelCase , num_proc=__UpperCamelCase ) , __UpperCamelCase )
self.assertEqual(map_nested(__UpperCamelCase , __UpperCamelCase , num_proc=__UpperCamelCase ) , __UpperCamelCase )
self.assertEqual(map_nested(__UpperCamelCase , __UpperCamelCase , num_proc=__UpperCamelCase ) , __UpperCamelCase )
self.assertEqual(map_nested(__UpperCamelCase , __UpperCamelCase , num_proc=__UpperCamelCase ) , __UpperCamelCase )
self.assertEqual(map_nested(__UpperCamelCase , __UpperCamelCase , num_proc=__UpperCamelCase ) , __UpperCamelCase )
UpperCAmelCase__ : List[str] = {"a": np.eye(2 ), "b": np.zeros(3 ), "c": np.ones(2 )}
UpperCAmelCase__ : Union[str, Any] = {"a": 2, "b": 0, "c": 2}
UpperCAmelCase__ : Optional[Any] = {
"a": np.eye(2 ).astype(__UpperCamelCase ),
"b": np.zeros(3 ).astype(__UpperCamelCase ),
"c": np.ones(2 ).astype(__UpperCamelCase ),
}
self.assertEqual(map_nested(__UpperCamelCase , __UpperCamelCase , map_numpy=__UpperCamelCase ) , __UpperCamelCase )
self.assertEqual(
{k: v.tolist() for k, v in map_nested(__UpperCamelCase , __UpperCamelCase , map_numpy=__UpperCamelCase ).items()} , {k: v.tolist() for k, v in expected_map_nested_sna_int.items()} , )
self.assertEqual(map_nested(__UpperCamelCase , __UpperCamelCase , map_numpy=__UpperCamelCase , num_proc=__UpperCamelCase ) , __UpperCamelCase )
self.assertEqual(
{k: v.tolist() for k, v in map_nested(__UpperCamelCase , __UpperCamelCase , map_numpy=__UpperCamelCase , num_proc=__UpperCamelCase ).items()} , {k: v.tolist() for k, v in expected_map_nested_sna_int.items()} , )
with self.assertRaises(__UpperCamelCase ): # can't pickle a local lambda
map_nested(lambda __UpperCamelCase : x + 1 , __UpperCamelCase , num_proc=__UpperCamelCase )
def lowerCAmelCase__ ( self )-> List[Any]:
UpperCAmelCase__ : int = {"a": 1, "b": 2}
UpperCAmelCase__ : int = {"a": 3, "b": 4}
UpperCAmelCase__ : List[str] = {"a": 5, "b": 6}
UpperCAmelCase__ : str = sorted([("a", (1, 3, 5)), ("b", (2, 4, 6))] )
self.assertEqual(sorted(zip_dict(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) ) , __UpperCamelCase )
def lowerCAmelCase__ ( self )-> List[Any]:
class _lowercase :
'''simple docstring'''
_A = 'bar'
UpperCAmelCase__ : List[Any] = Foo()
self.assertEqual(foo.my_attr , "bar" )
with temporary_assignment(__UpperCamelCase , "my_attr" , "BAR" ):
self.assertEqual(foo.my_attr , "BAR" )
self.assertEqual(foo.my_attr , "bar" )
@pytest.mark.parametrize(
"iterable_length, num_proc, expected_num_proc" , [
(1, None, 1),
(1, 1, 1),
(2, None, 1),
(2, 1, 1),
(2, 2, 1),
(2, 3, 1),
(3, 2, 1),
(16, 16, 16),
(16, 17, 16),
(17, 16, 16),
] , )
def a__ ( lowerCAmelCase : List[str] , lowerCAmelCase : Union[str, Any] , lowerCAmelCase : Dict ):
'''simple docstring'''
with patch("datasets.utils.py_utils._single_map_nested" ) as mock_single_map_nested, patch(
"datasets.parallel.parallel.Pool" ) as mock_multiprocessing_pool:
UpperCAmelCase__ : int = {F"{i}": i for i in range(lowerCAmelCase )}
UpperCAmelCase__ : Dict = map_nested(lambda lowerCAmelCase : x + 10 , lowerCAmelCase , num_proc=lowerCAmelCase , parallel_min_length=16 )
if expected_num_proc == 1:
assert mock_single_map_nested.called
assert not mock_multiprocessing_pool.called
else:
assert not mock_single_map_nested.called
assert mock_multiprocessing_pool.called
assert mock_multiprocessing_pool.call_args[0][0] == expected_num_proc
class _lowercase ( lowerCAmelCase_ ):
'''simple docstring'''
@require_tf
def lowerCAmelCase__ ( self )-> Tuple:
import tensorflow as tf
from tensorflow.keras import layers
UpperCAmelCase__ : Dict = layers.Dense(2 )
def gen_random_output():
UpperCAmelCase__ : str = tf.random.uniform((1, 3) )
return model(__UpperCamelCase ).numpy()
with temp_seed(42 , set_tensorflow=__UpperCamelCase ):
UpperCAmelCase__ : List[Any] = gen_random_output()
with temp_seed(42 , set_tensorflow=__UpperCamelCase ):
UpperCAmelCase__ : Optional[int] = gen_random_output()
UpperCAmelCase__ : int = gen_random_output()
np.testing.assert_equal(__UpperCamelCase , __UpperCamelCase )
self.assertGreater(np.abs(outa - outa ).sum() , 0 )
@require_torch
def lowerCAmelCase__ ( self )-> Optional[Any]:
import torch
def gen_random_output():
UpperCAmelCase__ : int = torch.nn.Linear(3 , 2 )
UpperCAmelCase__ : Dict = torch.rand(1 , 3 )
return model(__UpperCamelCase ).detach().numpy()
with temp_seed(42 , set_pytorch=__UpperCamelCase ):
UpperCAmelCase__ : List[str] = gen_random_output()
with temp_seed(42 , set_pytorch=__UpperCamelCase ):
UpperCAmelCase__ : Optional[Any] = gen_random_output()
UpperCAmelCase__ : List[str] = gen_random_output()
np.testing.assert_equal(__UpperCamelCase , __UpperCamelCase )
self.assertGreater(np.abs(outa - outa ).sum() , 0 )
def lowerCAmelCase__ ( self )-> str:
def gen_random_output():
return np.random.rand(1 , 3 )
with temp_seed(42 ):
UpperCAmelCase__ : Tuple = gen_random_output()
with temp_seed(42 ):
UpperCAmelCase__ : Optional[int] = gen_random_output()
UpperCAmelCase__ : int = gen_random_output()
np.testing.assert_equal(__UpperCamelCase , __UpperCamelCase )
self.assertGreater(np.abs(outa - outa ).sum() , 0 )
@pytest.mark.parametrize("input_data" , [{}] )
def a__ ( lowerCAmelCase : List[Any] ):
'''simple docstring'''
UpperCAmelCase__ : Dict = NestedDataStructure(lowerCAmelCase ).data
assert output_data == input_data
@pytest.mark.parametrize(
"data, expected_output" , [
({}, []),
([], []),
("foo", ["foo"]),
(["foo", "bar"], ["foo", "bar"]),
([["foo", "bar"]], ["foo", "bar"]),
([[["foo"], ["bar"]]], ["foo", "bar"]),
([[["foo"], "bar"]], ["foo", "bar"]),
({"a": 1, "b": 2}, [1, 2]),
({"a": [1, 2], "b": [3, 4]}, [1, 2, 3, 4]),
({"a": [[1, 2]], "b": [[3, 4]]}, [1, 2, 3, 4]),
({"a": [[1, 2]], "b": [3, 4]}, [1, 2, 3, 4]),
({"a": [[[1], [2]]], "b": [[[3], [4]]]}, [1, 2, 3, 4]),
({"a": [[[1], [2]]], "b": [[3, 4]]}, [1, 2, 3, 4]),
({"a": [[[1], [2]]], "b": [3, 4]}, [1, 2, 3, 4]),
({"a": [[[1], [2]]], "b": [3, [4]]}, [1, 2, 3, 4]),
({"a": {"1": 1}, "b": 2}, [1, 2]),
({"a": {"1": [1]}, "b": 2}, [1, 2]),
({"a": {"1": [1]}, "b": [2]}, [1, 2]),
] , )
def a__ ( lowerCAmelCase : str , lowerCAmelCase : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase__ : int = NestedDataStructure(lowerCAmelCase ).flatten()
assert output == expected_output
def a__ ( ):
'''simple docstring'''
UpperCAmelCase__ : Tuple = A(x=1 , y="foobar" )
UpperCAmelCase__ : Dict = {"x": 1, "y": "foobar"}
assert asdict(lowerCAmelCase ) == expected_output
UpperCAmelCase__ : Any = {"a": {"b": A(x=10 , y="foo" )}, "c": [A(x=20 , y="bar" )]}
UpperCAmelCase__ : Optional[int] = {"a": {"b": {"x": 10, "y": "foo"}}, "c": [{"x": 20, "y": "bar"}]}
assert asdict(lowerCAmelCase ) == expected_output
with pytest.raises(lowerCAmelCase ):
asdict([1, A(x=10 , y="foo" )] )
def a__ ( lowerCAmelCase : str ):
'''simple docstring'''
return text.split()
def a__ ( lowerCAmelCase : Any ):
'''simple docstring'''
yield (time.time(), content)
time.sleep(2 )
yield (time.time(), content)
def a__ ( ):
'''simple docstring'''
with Pool(2 ) as pool:
UpperCAmelCase__ : Dict = list(iflatmap_unordered(lowerCAmelCase , _split_text , kwargs_iterable=[{"text": "hello there"}] * 10 ) )
assert out.count("hello" ) == 10
assert out.count("there" ) == 10
assert len(lowerCAmelCase ) == 20
# check multiprocess from pathos (uses dill for pickling)
with multiprocess.Pool(2 ) as pool:
UpperCAmelCase__ : int = list(iflatmap_unordered(lowerCAmelCase , _split_text , kwargs_iterable=[{"text": "hello there"}] * 10 ) )
assert out.count("hello" ) == 10
assert out.count("there" ) == 10
assert len(lowerCAmelCase ) == 20
# check that we get items as fast as possible
with Pool(2 ) as pool:
UpperCAmelCase__ : Optional[int] = []
for yield_time, content in iflatmap_unordered(
lowerCAmelCase , _aseconds_generator_of_aitems_with_timing , kwargs_iterable=[{"content": "a"}, {"content": "b"}] ):
assert yield_time < time.time() + 0.1, "we should each item directly after it was yielded"
out.append(lowerCAmelCase )
assert out.count("a" ) == 2
assert out.count("b" ) == 2
assert len(lowerCAmelCase ) == 4
| 660 |
"""simple docstring"""
import timeit
import numpy as np
import datasets
from datasets.arrow_writer import ArrowWriter
from datasets.features.features import _ArrayXD
def a__ ( lowerCAmelCase : List[str] ):
'''simple docstring'''
def wrapper(*lowerCAmelCase : Any , **lowerCAmelCase : Tuple ):
UpperCAmelCase__ : Optional[int] = timeit.default_timer()
UpperCAmelCase__ : int = func(*lowerCAmelCase , **lowerCAmelCase )
UpperCAmelCase__ : List[Any] = timeit.default_timer() - starttime
return delta
UpperCAmelCase__ : int = func.__name__
return wrapper
def a__ ( lowerCAmelCase : dict , lowerCAmelCase : Optional[int]=100 , lowerCAmelCase : List[str]=None ):
'''simple docstring'''
UpperCAmelCase__ : str = []
UpperCAmelCase__ : Optional[Any] = seq_shapes or {}
for i in range(lowerCAmelCase ):
UpperCAmelCase__ : int = {}
for col_id, (k, v) in enumerate(features.items() ):
if isinstance(lowerCAmelCase , _ArrayXD ):
UpperCAmelCase__ : List[str] = np.random.rand(*v.shape ).astype(v.dtype )
elif isinstance(lowerCAmelCase , datasets.Value ):
if v.dtype == "string":
UpperCAmelCase__ : Dict = "The small grey turtle was surprisingly fast when challenged."
else:
UpperCAmelCase__ : str = np.random.randint(10 , size=1 ).astype(v.dtype ).item()
elif isinstance(lowerCAmelCase , datasets.Sequence ):
while isinstance(lowerCAmelCase , datasets.Sequence ):
UpperCAmelCase__ : List[str] = v.feature
UpperCAmelCase__ : Optional[int] = seq_shapes[k]
UpperCAmelCase__ : Optional[int] = np.random.rand(*lowerCAmelCase ).astype(v.dtype )
UpperCAmelCase__ : Union[str, Any] = data
dummy_data.append((i, example) )
return dummy_data
def a__ ( lowerCAmelCase : List[str] , lowerCAmelCase : Tuple , lowerCAmelCase : List[str]=100 , lowerCAmelCase : Optional[int]=None ):
'''simple docstring'''
UpperCAmelCase__ : int = generate_examples(lowerCAmelCase , num_examples=lowerCAmelCase , seq_shapes=lowerCAmelCase )
with ArrowWriter(features=lowerCAmelCase , path=lowerCAmelCase ) as writer:
for key, record in dummy_data:
UpperCAmelCase__ : List[Any] = features.encode_example(lowerCAmelCase )
writer.write(lowerCAmelCase )
UpperCAmelCase__ , UpperCAmelCase__ : Optional[Any] = writer.finalize()
if not num_final_examples == num_examples:
raise ValueError(
F"Error writing the dataset, wrote {num_final_examples} examples but should have written {num_examples}." )
UpperCAmelCase__ : Optional[int] = datasets.Dataset.from_file(filename=lowerCAmelCase , info=datasets.DatasetInfo(features=lowerCAmelCase ) )
return dataset
| 660 | 1 |
"""simple docstring"""
import json
import logging
import math
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
from datasets import Dataset, load_dataset
import transformers
from transformers import (
CONFIG_MAPPING,
MODEL_FOR_MASKED_LM_MAPPING,
AutoConfig,
AutoModelForMaskedLM,
AutoTokenizer,
DataCollatorForWholeWordMask,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint, is_main_process
A__ : Union[str, Any] = logging.getLogger(__name__)
A__ : Dict = list(MODEL_FOR_MASKED_LM_MAPPING.keys())
A__ : List[Any] = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class _lowercase :
'''simple docstring'''
_A = field(
default=lowerCAmelCase_ , metadata={
'help': (
'The model checkpoint for weights initialization.Don\'t set if you want to train a model from scratch.'
)
} , )
_A = field(
default=lowerCAmelCase_ , metadata={'help': 'If training from scratch, pass a model type from the list: ' + ', '.join(lowerCAmelCase_ )} , )
_A = field(
default=lowerCAmelCase_ , metadata={
'help': (
'Override some existing default config settings when a model is trained from scratch. Example: '
'n_embd=10,resid_pdrop=0.2,scale_attn_weights=false,summary_type=cls_index'
)
} , )
_A = field(
default=lowerCAmelCase_ , metadata={'help': 'Pretrained config name or path if not the same as model_name'} )
_A = field(
default=lowerCAmelCase_ , metadata={'help': 'Pretrained tokenizer name or path if not the same as model_name'} )
_A = field(
default=lowerCAmelCase_ , metadata={'help': 'Where do you want to store the pretrained models downloaded from huggingface.co'} , )
_A = field(
default=lowerCAmelCase_ , metadata={'help': 'Whether to use one of the fast tokenizer (backed by the tokenizers library) or not.'} , )
_A = field(
default='main' , metadata={'help': 'The specific model version to use (can be a branch name, tag name or commit id).'} , )
_A = field(
default=lowerCAmelCase_ , metadata={
'help': (
'Will use the token generated when running `huggingface-cli login` (necessary to use this script '
'with private models).'
)
} , )
def lowerCAmelCase__ ( self )-> Optional[int]:
if self.config_overrides is not None and (self.config_name is not None or self.model_name_or_path is not None):
raise ValueError(
"--config_overrides can't be used in combination with --config_name or --model_name_or_path" )
@dataclass
class _lowercase :
'''simple docstring'''
_A = field(
default=lowerCAmelCase_ , metadata={'help': 'The name of the dataset to use (via the datasets library).'} )
_A = field(
default=lowerCAmelCase_ , metadata={'help': 'The configuration name of the dataset to use (via the datasets library).'} )
_A = field(default=lowerCAmelCase_ , metadata={'help': 'The input training data file (a text file).'} )
_A = field(
default=lowerCAmelCase_ , metadata={'help': 'An optional input evaluation data file to evaluate the perplexity on (a text file).'} , )
_A = field(
default=lowerCAmelCase_ , metadata={'help': 'An optional input train ref data file for whole word masking in Chinese.'} , )
_A = field(
default=lowerCAmelCase_ , metadata={'help': 'An optional input validation ref data file for whole word masking in Chinese.'} , )
_A = field(
default=lowerCAmelCase_ , metadata={'help': 'Overwrite the cached training and evaluation sets'} )
_A = field(
default=5 , metadata={
'help': 'The percentage of the train set used as validation set in case there\'s no validation split'
} , )
_A = field(
default=lowerCAmelCase_ , metadata={
'help': (
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated. Default to the max input length of the model.'
)
} , )
_A = field(
default=lowerCAmelCase_ , metadata={'help': 'The number of processes to use for the preprocessing.'} , )
_A = field(
default=0.15 , metadata={'help': 'Ratio of tokens to mask for masked language modeling loss'} )
_A = field(
default=lowerCAmelCase_ , metadata={
'help': (
'Whether to pad all samples to `max_seq_length`. '
'If False, will pad the samples dynamically when batching to the maximum length in the batch.'
)
} , )
def lowerCAmelCase__ ( self )-> Union[str, Any]:
if self.train_file is not None:
UpperCAmelCase__ : Tuple = self.train_file.split("." )[-1]
assert extension in ["csv", "json", "txt"], "`train_file` should be a csv, a json or a txt file."
if self.validation_file is not None:
UpperCAmelCase__ : Any = self.validation_file.split("." )[-1]
assert extension in ["csv", "json", "txt"], "`validation_file` should be a csv, a json or a txt file."
def a__ ( lowerCAmelCase : Tuple , lowerCAmelCase : Any ):
'''simple docstring'''
with open(lowerCAmelCase , "r" , encoding="utf-8" ) as f:
UpperCAmelCase__ : Optional[Any] = [json.loads(lowerCAmelCase ) for line in f.read().splitlines() if (len(lowerCAmelCase ) > 0 and not line.isspace())]
assert len(lowerCAmelCase ) == len(lowerCAmelCase )
UpperCAmelCase__ : int = {c: dataset[c] for c in dataset.column_names}
UpperCAmelCase__ : Optional[Any] = refs
return Dataset.from_dict(lowerCAmelCase )
def a__ ( ):
'''simple docstring'''
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
UpperCAmelCase__ : List[str] = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(".json" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : List[str] = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : List[str] = parser.parse_args_into_dataclasses()
# Detecting last checkpoint.
UpperCAmelCase__ : str = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
UpperCAmelCase__ : List[Any] = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F"Output directory ({training_args.output_dir}) already exists and is not empty. "
"Use --overwrite_output_dir to overcome." )
elif last_checkpoint is not None:
logger.info(
F"Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change "
"the `--output_dir` or add `--overwrite_output_dir` to train from scratch." )
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , handlers=[logging.StreamHandler(sys.stdout )] , )
logger.setLevel(logging.INFO if is_main_process(training_args.local_rank ) else logging.WARN )
# Log on each process the small summary:
logger.warning(
F"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"
+ F"distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}" )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info("Training/evaluation parameters %s" , lowerCAmelCase )
# Set seed before initializing model.
set_seed(training_args.seed )
# Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below)
# or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/
# (the dataset will be downloaded automatically from the datasets Hub).
#
# For CSV/JSON files, this script will use the column called 'text' or the first column if no column called
# 'text' is found. You can easily tweak this behavior (see below).
#
# In distributed training, the load_dataset function guarantee that only one local process can concurrently
# download the dataset.
if data_args.dataset_name is not None:
# Downloading and loading a dataset from the hub.
UpperCAmelCase__ : Optional[int] = load_dataset(data_args.dataset_name , data_args.dataset_config_name )
if "validation" not in datasets.keys():
UpperCAmelCase__ : str = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=F"train[:{data_args.validation_split_percentage}%]" , )
UpperCAmelCase__ : Tuple = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=F"train[{data_args.validation_split_percentage}%:]" , )
else:
UpperCAmelCase__ : Dict = {}
if data_args.train_file is not None:
UpperCAmelCase__ : Dict = data_args.train_file
if data_args.validation_file is not None:
UpperCAmelCase__ : str = data_args.validation_file
UpperCAmelCase__ : List[str] = data_args.train_file.split("." )[-1]
if extension == "txt":
UpperCAmelCase__ : List[Any] = "text"
UpperCAmelCase__ : str = load_dataset(lowerCAmelCase , data_files=lowerCAmelCase )
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
UpperCAmelCase__ : List[Any] = {
"cache_dir": model_args.cache_dir,
"revision": model_args.model_revision,
"use_auth_token": True if model_args.use_auth_token else None,
}
if model_args.config_name:
UpperCAmelCase__ : Any = AutoConfig.from_pretrained(model_args.config_name , **lowerCAmelCase )
elif model_args.model_name_or_path:
UpperCAmelCase__ : Tuple = AutoConfig.from_pretrained(model_args.model_name_or_path , **lowerCAmelCase )
else:
UpperCAmelCase__ : Optional[Any] = CONFIG_MAPPING[model_args.model_type]()
logger.warning("You are instantiating a new config instance from scratch." )
if model_args.config_overrides is not None:
logger.info(F"Overriding config: {model_args.config_overrides}" )
config.update_from_string(model_args.config_overrides )
logger.info(F"New config: {config}" )
UpperCAmelCase__ : str = {
"cache_dir": model_args.cache_dir,
"use_fast": model_args.use_fast_tokenizer,
"revision": model_args.model_revision,
"use_auth_token": True if model_args.use_auth_token else None,
}
if model_args.tokenizer_name:
UpperCAmelCase__ : List[Any] = AutoTokenizer.from_pretrained(model_args.tokenizer_name , **lowerCAmelCase )
elif model_args.model_name_or_path:
UpperCAmelCase__ : Optional[int] = AutoTokenizer.from_pretrained(model_args.model_name_or_path , **lowerCAmelCase )
else:
raise ValueError(
"You are instantiating a new tokenizer from scratch. This is not supported by this script."
"You can do it from another script, save it, and load it from here, using --tokenizer_name." )
if model_args.model_name_or_path:
UpperCAmelCase__ : List[Any] = AutoModelForMaskedLM.from_pretrained(
model_args.model_name_or_path , from_tf=bool(".ckpt" in model_args.model_name_or_path ) , config=lowerCAmelCase , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
else:
logger.info("Training new model from scratch" )
UpperCAmelCase__ : Tuple = AutoModelForMaskedLM.from_config(lowerCAmelCase )
model.resize_token_embeddings(len(lowerCAmelCase ) )
# Preprocessing the datasets.
# First we tokenize all the texts.
if training_args.do_train:
UpperCAmelCase__ : List[str] = datasets["train"].column_names
else:
UpperCAmelCase__ : List[str] = datasets["validation"].column_names
UpperCAmelCase__ : Dict = "text" if "text" in column_names else column_names[0]
UpperCAmelCase__ : List[str] = "max_length" if data_args.pad_to_max_length else False
def tokenize_function(lowerCAmelCase : Tuple ):
# Remove empty lines
UpperCAmelCase__ : Dict = [line for line in examples["text"] if len(lowerCAmelCase ) > 0 and not line.isspace()]
return tokenizer(examples["text"] , padding=lowerCAmelCase , truncation=lowerCAmelCase , max_length=data_args.max_seq_length )
UpperCAmelCase__ : str = datasets.map(
lowerCAmelCase , batched=lowerCAmelCase , num_proc=data_args.preprocessing_num_workers , remove_columns=[text_column_name] , load_from_cache_file=not data_args.overwrite_cache , )
# Add the chinese references if provided
if data_args.train_ref_file is not None:
UpperCAmelCase__ : Any = add_chinese_references(tokenized_datasets["train"] , data_args.train_ref_file )
if data_args.validation_ref_file is not None:
UpperCAmelCase__ : List[str] = add_chinese_references(
tokenized_datasets["validation"] , data_args.validation_ref_file )
# If we have ref files, need to avoid it removed by trainer
UpperCAmelCase__ : Dict = data_args.train_ref_file or data_args.validation_ref_file
if has_ref:
UpperCAmelCase__ : Optional[int] = False
# Data collator
# This one will take care of randomly masking the tokens.
UpperCAmelCase__ : Tuple = DataCollatorForWholeWordMask(tokenizer=lowerCAmelCase , mlm_probability=data_args.mlm_probability )
# Initialize our Trainer
UpperCAmelCase__ : Dict = Trainer(
model=lowerCAmelCase , args=lowerCAmelCase , train_dataset=tokenized_datasets["train"] if training_args.do_train else None , eval_dataset=tokenized_datasets["validation"] if training_args.do_eval else None , tokenizer=lowerCAmelCase , data_collator=lowerCAmelCase , )
# Training
if training_args.do_train:
if last_checkpoint is not None:
UpperCAmelCase__ : List[str] = last_checkpoint
elif model_args.model_name_or_path is not None and os.path.isdir(model_args.model_name_or_path ):
UpperCAmelCase__ : Union[str, Any] = model_args.model_name_or_path
else:
UpperCAmelCase__ : List[Any] = None
UpperCAmelCase__ : List[Any] = trainer.train(resume_from_checkpoint=lowerCAmelCase )
trainer.save_model() # Saves the tokenizer too for easy upload
UpperCAmelCase__ : int = os.path.join(training_args.output_dir , "train_results.txt" )
if trainer.is_world_process_zero():
with open(lowerCAmelCase , "w" ) as writer:
logger.info("***** Train results *****" )
for key, value in sorted(train_result.metrics.items() ):
logger.info(F" {key} = {value}" )
writer.write(F"{key} = {value}\n" )
# Need to save the state, since Trainer.save_model saves only the tokenizer with the model
trainer.state.save_to_json(os.path.join(training_args.output_dir , "trainer_state.json" ) )
# Evaluation
UpperCAmelCase__ : Optional[Any] = {}
if training_args.do_eval:
logger.info("*** Evaluate ***" )
UpperCAmelCase__ : Optional[Any] = trainer.evaluate()
UpperCAmelCase__ : Union[str, Any] = math.exp(eval_output["eval_loss"] )
UpperCAmelCase__ : int = perplexity
UpperCAmelCase__ : List[Any] = os.path.join(training_args.output_dir , "eval_results_mlm_wwm.txt" )
if trainer.is_world_process_zero():
with open(lowerCAmelCase , "w" ) as writer:
logger.info("***** Eval results *****" )
for key, value in sorted(results.items() ):
logger.info(F" {key} = {value}" )
writer.write(F"{key} = {value}\n" )
return results
def a__ ( lowerCAmelCase : Tuple ):
'''simple docstring'''
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 660 |
"""simple docstring"""
from manim import *
class _lowercase ( lowerCAmelCase_ ):
'''simple docstring'''
def lowerCAmelCase__ ( self )-> Tuple:
UpperCAmelCase__ : str = Rectangle(height=0.5 , width=0.5 )
UpperCAmelCase__ : List[str] = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0 )
UpperCAmelCase__ : List[Any] = [mem.copy() for i in range(6 )]
UpperCAmelCase__ : Union[str, Any] = [mem.copy() for i in range(6 )]
UpperCAmelCase__ : List[Any] = VGroup(*__UpperCamelCase ).arrange(__UpperCamelCase , buff=0 )
UpperCAmelCase__ : int = VGroup(*__UpperCamelCase ).arrange(__UpperCamelCase , buff=0 )
UpperCAmelCase__ : Tuple = VGroup(__UpperCamelCase , __UpperCamelCase ).arrange(__UpperCamelCase , buff=0 )
UpperCAmelCase__ : Tuple = Text("CPU" , font_size=24 )
UpperCAmelCase__ : Any = Group(__UpperCamelCase , __UpperCamelCase ).arrange(__UpperCamelCase , buff=0.5 , aligned_edge=__UpperCamelCase )
cpu.move_to([-2.5, -0.5, 0] )
self.add(__UpperCamelCase )
UpperCAmelCase__ : List[Any] = [mem.copy() for i in range(4 )]
UpperCAmelCase__ : List[str] = VGroup(*__UpperCamelCase ).arrange(__UpperCamelCase , buff=0 )
UpperCAmelCase__ : Union[str, Any] = Text("GPU" , font_size=24 )
UpperCAmelCase__ : Dict = Group(__UpperCamelCase , __UpperCamelCase ).arrange(__UpperCamelCase , buff=0.5 , aligned_edge=__UpperCamelCase )
gpu.move_to([-1, -1, 0] )
self.add(__UpperCamelCase )
UpperCAmelCase__ : Optional[int] = [mem.copy() for i in range(6 )]
UpperCAmelCase__ : List[str] = VGroup(*__UpperCamelCase ).arrange(__UpperCamelCase , buff=0 )
UpperCAmelCase__ : Tuple = Text("Model" , font_size=24 )
UpperCAmelCase__ : Dict = Group(__UpperCamelCase , __UpperCamelCase ).arrange(__UpperCamelCase , buff=0.5 , aligned_edge=__UpperCamelCase )
model.move_to([3, -1.0, 0] )
self.add(__UpperCamelCase )
UpperCAmelCase__ : List[str] = []
for i, rect in enumerate(__UpperCamelCase ):
rect.set_stroke(__UpperCamelCase )
# target = fill.copy().set_fill(YELLOW, opacity=0.7)
# target.move_to(rect)
# self.add(target)
UpperCAmelCase__ : int = Rectangle(height=0.46 / 4 , width=0.46 / 3 ).set_stroke(width=0.0 ).set_fill(__UpperCamelCase , opacity=0.7 )
if i == 0:
cpu_target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT ) , buff=0.02 , direction=__UpperCamelCase )
cpu_target.set_x(cpu_target.get_x() + 0.1 )
elif i == 3:
cpu_target.next_to(cpu_targs[0] , direction=__UpperCamelCase , buff=0.0 )
else:
cpu_target.next_to(cpu_targs[i - 1] , direction=__UpperCamelCase , buff=0.0 )
self.add(__UpperCamelCase )
cpu_targs.append(__UpperCamelCase )
UpperCAmelCase__ : List[Any] = [mem.copy() for i in range(6 )]
UpperCAmelCase__ : Any = VGroup(*__UpperCamelCase ).arrange(__UpperCamelCase , buff=0 )
UpperCAmelCase__ : Tuple = Text("Loaded Checkpoint" , font_size=24 )
UpperCAmelCase__ : Any = Group(__UpperCamelCase , __UpperCamelCase ).arrange(__UpperCamelCase , aligned_edge=__UpperCamelCase , buff=0.4 )
checkpoint.move_to([3, 0.5, 0] )
UpperCAmelCase__ : Optional[Any] = Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
UpperCAmelCase__ : Any = MarkupText(
F"<b>Key:</b>\n\n<span fgcolor='{YELLOW}'>●</span> Empty Model" , font_size=18 , )
key_text.move_to([-5, 2.4, 0] )
self.add(__UpperCamelCase , __UpperCamelCase )
UpperCAmelCase__ : str = MarkupText(
F"<span fgcolor='{BLUE}'>●</span> Checkpoint" , font_size=18 , )
blue_text.next_to(__UpperCamelCase , DOWN * 2.4 , aligned_edge=key_text.get_left() )
UpperCAmelCase__ : Optional[Any] = MarkupText(
F"Next, a <i><span fgcolor=\"{BLUE}\">second</span></i> model is loaded into memory,\nwith the weights of a <span fgcolor=\"{BLUE}\">single shard</span>." , font_size=24 , )
step_a.move_to([2, 2, 0] )
self.play(Write(__UpperCamelCase ) , Write(__UpperCamelCase ) )
self.play(Write(__UpperCamelCase , run_time=1 ) , Create(__UpperCamelCase , run_time=1 ) )
UpperCAmelCase__ : Union[str, Any] = []
UpperCAmelCase__ : List[str] = []
for i, rect in enumerate(__UpperCamelCase ):
UpperCAmelCase__ : Optional[Any] = fill.copy().set_fill(__UpperCamelCase , opacity=0.7 )
target.move_to(__UpperCamelCase )
first_animations.append(GrowFromCenter(__UpperCamelCase , run_time=1 ) )
UpperCAmelCase__ : List[str] = target.copy()
cpu_target.generate_target()
if i < 5:
cpu_target.target.move_to(cpu_left_col_base[i + 1] )
else:
cpu_target.target.move_to(cpu_right_col_base[i - 5] )
second_animations.append(MoveToTarget(__UpperCamelCase , run_time=1.5 ) )
self.play(*__UpperCamelCase )
self.play(*__UpperCamelCase )
self.wait()
| 660 | 1 |
"""simple docstring"""
# limitations under the License.
# NOTE: This file is deprecated and will be removed in a future version.
# It only exists so that temporarely `from diffusers.pipelines import DiffusionPipeline` works
from .pipelines import DiffusionPipeline, ImagePipelineOutput # noqa: F401
from .utils import deprecate
deprecate(
"""pipelines_utils""",
"""0.22.0""",
"""Importing `DiffusionPipeline` or `ImagePipelineOutput` from diffusers.pipeline_utils is deprecated. Please import from diffusers.pipelines.pipeline_utils instead.""",
standard_warn=False,
stacklevel=3,
)
| 660 |
"""simple docstring"""
import warnings
from pathlib import Path
from typing import List, Tuple, Union
import fire
from torch import nn
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer, PreTrainedModel
from transformers.utils import logging
A__ : Tuple = logging.get_logger(__name__)
def a__ ( lowerCAmelCase : nn.ModuleList , lowerCAmelCase : nn.ModuleList , lowerCAmelCase : List[int] ):
'''simple docstring'''
UpperCAmelCase__ : Optional[Any] = nn.ModuleList([src_layers[i] for i in layers_to_copy] )
assert len(lowerCAmelCase ) == len(lowerCAmelCase ), F"{len(lowerCAmelCase )} != {len(lowerCAmelCase )}"
dest_layers.load_state_dict(layers_to_copy.state_dict() )
A__ : List[Any] = {
# maps num layers in teacher -> num_layers in student -> which teacher layers to copy.
# 12: bart, 16: pegasus, 6: marian/Helsinki-NLP
12: {
1: [0], # This says that if the teacher has 12 layers and the student has 1, copy layer 0 of the teacher
2: [0, 6],
3: [0, 6, 11],
4: [0, 4, 8, 11],
6: [0, 2, 4, 7, 9, 11],
9: [0, 1, 2, 4, 5, 7, 9, 10, 11],
12: list(range(12)),
},
16: { # maps num layers in student -> which teacher layers to copy
1: [0],
2: [0, 15],
3: [0, 8, 15],
4: [0, 5, 10, 15],
6: [0, 3, 6, 9, 12, 15],
8: [0, 2, 4, 6, 8, 10, 12, 15],
9: [0, 1, 3, 5, 7, 9, 11, 13, 15],
12: [0, 1, 2, 3, 4, 5, 6, 7, 9, 11, 13, 15],
16: list(range(16)),
},
6: {1: [0], 2: [0, 5], 3: [0, 2, 5], 4: [0, 1, 3, 5], 6: list(range(6))},
}
A__ : List[Any] = {
# maps num layers in student -> which teacher layers to copy.
6: {1: [5], 2: [3, 5], 3: [1, 4, 5], 4: [1, 2, 4, 5]},
12: {1: [11], 2: [5, 11], 3: [3, 7, 11], 6: [1, 3, 5, 8, 10, 11]},
16: {1: [15], 4: [4, 9, 12, 15], 8: [1, 3, 5, 7, 9, 11, 13, 15]},
}
def a__ ( lowerCAmelCase : Dict , lowerCAmelCase : Dict ):
'''simple docstring'''
try:
UpperCAmelCase__ : Tuple = LAYERS_TO_COPY[n_teacher][n_student]
return val
except KeyError:
if n_student != n_teacher:
warnings.warn(
F"no hardcoded layers to copy for teacher {n_teacher} -> student {n_student}, defaulting to first"
F" {n_student}" )
return list(range(lowerCAmelCase ) )
def a__ ( lowerCAmelCase : int , lowerCAmelCase : Tuple ):
'''simple docstring'''
if n_student > n_teacher:
raise ValueError(F"Cannot perform intermediate supervision for student {n_student} > teacher {n_teacher}" )
elif n_teacher == n_student:
return list(range(lowerCAmelCase ) )
elif n_student == 1:
return [n_teacher - 1]
else:
return LAYERS_TO_SUPERVISE[n_teacher][n_student]
def a__ ( lowerCAmelCase : Union[str, PreTrainedModel] , lowerCAmelCase : Union[str, Path] = "student" , lowerCAmelCase : Union[int, None] = None , lowerCAmelCase : Union[int, None] = None , lowerCAmelCase : List[str]=False , lowerCAmelCase : List[str]=None , lowerCAmelCase : List[str]=None , **lowerCAmelCase : List[str] , ):
'''simple docstring'''
UpperCAmelCase__ : List[str] = "encoder_layers and decoder_layers cannot be both None-- you would just have an identical teacher."
assert (e is not None) or (d is not None), _msg
if isinstance(lowerCAmelCase , lowerCAmelCase ):
AutoTokenizer.from_pretrained(lowerCAmelCase ).save_pretrained(lowerCAmelCase ) # purely for convenience
UpperCAmelCase__ : List[str] = AutoModelForSeqaSeqLM.from_pretrained(lowerCAmelCase ).eval()
else:
assert isinstance(lowerCAmelCase , lowerCAmelCase ), F"teacher must be a model or string got type {type(lowerCAmelCase )}"
UpperCAmelCase__ : int = teacher.config.to_diff_dict()
try:
UpperCAmelCase__ , UpperCAmelCase__ : Optional[Any] = teacher.config.encoder_layers, teacher.config.decoder_layers
if e is None:
UpperCAmelCase__ : Tuple = teacher_e
if d is None:
UpperCAmelCase__ : str = teacher_d
init_kwargs.update({"encoder_layers": e, "decoder_layers": d} )
except AttributeError: # T5
if hasattr(teacher.config , "num_encoder_layers" ):
UpperCAmelCase__ , UpperCAmelCase__ : Union[str, Any] = teacher.config.num_encoder_layers, teacher.config.num_decoder_layers
else:
UpperCAmelCase__ , UpperCAmelCase__ : Union[str, Any] = teacher.config.num_layers, teacher.config.num_decoder_layers
if e is None:
UpperCAmelCase__ : Optional[Any] = teacher_e
if d is None:
UpperCAmelCase__ : Optional[Any] = teacher_d
if hasattr(teacher.config , "num_encoder_layers" ):
init_kwargs.update({"num_encoder_layers": e, "num_decoder_layers": d} )
else:
init_kwargs.update({"num_layers": e, "num_decoder_layers": d} )
# Kwargs to instantiate student: teacher kwargs with updated layer numbers + **extra_config_kwargs
init_kwargs.update(lowerCAmelCase )
# Copy weights
UpperCAmelCase__ : Tuple = teacher.config_class(**lowerCAmelCase )
UpperCAmelCase__ : List[str] = AutoModelForSeqaSeqLM.from_config(lowerCAmelCase )
# Start by copying the full teacher state dict this will copy the first N teacher layers to the student.
UpperCAmelCase__ : Optional[int] = student.load_state_dict(teacher.state_dict() , strict=lowerCAmelCase )
assert info.missing_keys == [], info.missing_keys # every student key should have a teacher keys.
if copy_first_teacher_layers: # Our copying is done. We just log and save
UpperCAmelCase__ , UpperCAmelCase__ : int = list(range(lowerCAmelCase ) ), list(range(lowerCAmelCase ) )
logger.info(
F"Copied encoder layers {e_layers_to_copy} and decoder layers {d_layers_to_copy}. Saving them to"
F" {save_path}" )
student.save_pretrained(lowerCAmelCase )
return student, e_layers_to_copy, d_layers_to_copy
# Decide which layers of the teacher to copy. Not exactly alternating -- we try to keep first and last layer.
if e_layers_to_copy is None:
UpperCAmelCase__ : List[int] = pick_layers_to_copy(lowerCAmelCase , lowerCAmelCase )
if d_layers_to_copy is None:
UpperCAmelCase__ : List[int] = pick_layers_to_copy(lowerCAmelCase , lowerCAmelCase )
try:
if hasattr(
lowerCAmelCase , "prophetnet" ): # For ProphetNet, student.model.encoder.layers is called student.prophetnet.encoder.layers
copy_layers(teacher.prophetnet.encoder.layers , student.prophetnet.encoder.layers , lowerCAmelCase )
copy_layers(teacher.prophetnet.decoder.layers , student.prophetnet.decoder.layers , lowerCAmelCase )
else:
copy_layers(teacher.model.encoder.layers , student.model.encoder.layers , lowerCAmelCase )
copy_layers(teacher.model.decoder.layers , student.model.decoder.layers , lowerCAmelCase )
except AttributeError: # For t5, student.model.encoder.layers is called student.encoder.block
copy_layers(teacher.encoder.block , student.encoder.block , lowerCAmelCase )
copy_layers(teacher.decoder.block , student.decoder.block , lowerCAmelCase )
logger.info(
F"Copied encoder layers {e_layers_to_copy} and decoder layers {d_layers_to_copy}. Saving them to {save_path}" )
UpperCAmelCase__ : int = {
"teacher_type": teacher.config.model_type,
"copied_encoder_layers": e_layers_to_copy,
"copied_decoder_layers": d_layers_to_copy,
}
student.save_pretrained(lowerCAmelCase )
# Save information about copying for easier reproducibility
return student, e_layers_to_copy, d_layers_to_copy
if __name__ == "__main__":
fire.Fire(create_student_by_copying_alternating_layers)
| 660 | 1 |
"""simple docstring"""
import argparse
import logging
import pickle
import random
import time
import numpy as np
from transformers import BertTokenizer, GPTaTokenizer, RobertaTokenizer
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""", datefmt="""%m/%d/%Y %H:%M:%S""", level=logging.INFO
)
A__ : List[Any] = logging.getLogger(__name__)
def a__ ( ):
'''simple docstring'''
UpperCAmelCase__ : Dict = argparse.ArgumentParser(
description="Preprocess the data to avoid re-doing it several times by (tokenization + token_to_ids)." )
parser.add_argument("--file_path" , type=lowerCAmelCase , default="data/dump.txt" , help="The path to the data." )
parser.add_argument("--tokenizer_type" , type=lowerCAmelCase , default="bert" , choices=["bert", "roberta", "gpt2"] )
parser.add_argument("--tokenizer_name" , type=lowerCAmelCase , default="bert-base-uncased" , help="The tokenizer to use." )
parser.add_argument("--dump_file" , type=lowerCAmelCase , default="data/dump" , help="The dump file prefix." )
UpperCAmelCase__ : Tuple = parser.parse_args()
logger.info(F"Loading Tokenizer ({args.tokenizer_name})" )
if args.tokenizer_type == "bert":
UpperCAmelCase__ : Optional[Any] = BertTokenizer.from_pretrained(args.tokenizer_name )
UpperCAmelCase__ : Tuple = tokenizer.special_tokens_map["cls_token"] # `[CLS]`
UpperCAmelCase__ : Optional[int] = tokenizer.special_tokens_map["sep_token"] # `[SEP]`
elif args.tokenizer_type == "roberta":
UpperCAmelCase__ : Optional[int] = RobertaTokenizer.from_pretrained(args.tokenizer_name )
UpperCAmelCase__ : Any = tokenizer.special_tokens_map["cls_token"] # `<s>`
UpperCAmelCase__ : Tuple = tokenizer.special_tokens_map["sep_token"] # `</s>`
elif args.tokenizer_type == "gpt2":
UpperCAmelCase__ : Tuple = GPTaTokenizer.from_pretrained(args.tokenizer_name )
UpperCAmelCase__ : Dict = tokenizer.special_tokens_map["bos_token"] # `<|endoftext|>`
UpperCAmelCase__ : Any = tokenizer.special_tokens_map["eos_token"] # `<|endoftext|>`
logger.info(F"Loading text from {args.file_path}" )
with open(args.file_path , "r" , encoding="utf8" ) as fp:
UpperCAmelCase__ : int = fp.readlines()
logger.info("Start encoding" )
logger.info(F"{len(lowerCAmelCase )} examples to process." )
UpperCAmelCase__ : List[Any] = []
UpperCAmelCase__ : Dict = 0
UpperCAmelCase__ : Union[str, Any] = 1_0000
UpperCAmelCase__ : List[Any] = time.time()
for text in data:
UpperCAmelCase__ : List[str] = F"{bos} {text.strip()} {sep}"
UpperCAmelCase__ : Dict = tokenizer.encode(lowerCAmelCase , add_special_tokens=lowerCAmelCase )
rslt.append(lowerCAmelCase )
iter += 1
if iter % interval == 0:
UpperCAmelCase__ : int = time.time()
logger.info(F"{iter} examples processed. - {(end-start):.2f}s/{interval}expl" )
UpperCAmelCase__ : List[Any] = time.time()
logger.info("Finished binarization" )
logger.info(F"{len(lowerCAmelCase )} examples processed." )
UpperCAmelCase__ : Optional[int] = F"{args.dump_file}.{args.tokenizer_name}.pickle"
UpperCAmelCase__ : Union[str, Any] = tokenizer.vocab_size
if vocab_size < (1 << 16):
UpperCAmelCase__ : int = [np.uintaa(lowerCAmelCase ) for d in rslt]
else:
UpperCAmelCase__ : Union[str, Any] = [np.intaa(lowerCAmelCase ) for d in rslt]
random.shuffle(rslt_ )
logger.info(F"Dump to {dp_file}" )
with open(lowerCAmelCase , "wb" ) as handle:
pickle.dump(rslt_ , lowerCAmelCase , protocol=pickle.HIGHEST_PROTOCOL )
if __name__ == "__main__":
main()
| 660 |
"""simple docstring"""
import unittest
import numpy as np
from diffusers import LMSDiscreteScheduler, OnnxStableDiffusionInpaintPipeline
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
nightly,
require_onnxruntime,
require_torch_gpu,
)
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class _lowercase ( lowerCAmelCase_ , unittest.TestCase ):
'''simple docstring'''
pass
@nightly
@require_onnxruntime
@require_torch_gpu
class _lowercase ( unittest.TestCase ):
'''simple docstring'''
@property
def lowerCAmelCase__ ( self )-> int:
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def lowerCAmelCase__ ( self )-> Any:
UpperCAmelCase__ : Tuple = ort.SessionOptions()
UpperCAmelCase__ : List[str] = False
return options
def lowerCAmelCase__ ( self )-> List[str]:
UpperCAmelCase__ : Union[str, Any] = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/in_paint/overture-creations-5sI6fQgYIuo.png" )
UpperCAmelCase__ : int = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/in_paint/overture-creations-5sI6fQgYIuo_mask.png" )
UpperCAmelCase__ : str = OnnxStableDiffusionInpaintPipeline.from_pretrained(
"runwayml/stable-diffusion-inpainting" , revision="onnx" , safety_checker=__UpperCamelCase , feature_extractor=__UpperCamelCase , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=__UpperCamelCase )
UpperCAmelCase__ : int = "A red cat sitting on a park bench"
UpperCAmelCase__ : Tuple = np.random.RandomState(0 )
UpperCAmelCase__ : Any = pipe(
prompt=__UpperCamelCase , image=__UpperCamelCase , mask_image=__UpperCamelCase , guidance_scale=7.5 , num_inference_steps=10 , generator=__UpperCamelCase , output_type="np" , )
UpperCAmelCase__ : Tuple = output.images
UpperCAmelCase__ : Dict = images[0, 2_55:2_58, 2_55:2_58, -1]
assert images.shape == (1, 5_12, 5_12, 3)
UpperCAmelCase__ : Union[str, Any] = np.array([0.2514, 0.3007, 0.3517, 0.1790, 0.2382, 0.3167, 0.1944, 0.2273, 0.2464] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def lowerCAmelCase__ ( self )-> Tuple:
UpperCAmelCase__ : Tuple = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/in_paint/overture-creations-5sI6fQgYIuo.png" )
UpperCAmelCase__ : Tuple = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/in_paint/overture-creations-5sI6fQgYIuo_mask.png" )
UpperCAmelCase__ : Optional[Any] = LMSDiscreteScheduler.from_pretrained(
"runwayml/stable-diffusion-inpainting" , subfolder="scheduler" , revision="onnx" )
UpperCAmelCase__ : Optional[Any] = OnnxStableDiffusionInpaintPipeline.from_pretrained(
"runwayml/stable-diffusion-inpainting" , revision="onnx" , scheduler=__UpperCamelCase , safety_checker=__UpperCamelCase , feature_extractor=__UpperCamelCase , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=__UpperCamelCase )
UpperCAmelCase__ : int = "A red cat sitting on a park bench"
UpperCAmelCase__ : List[str] = np.random.RandomState(0 )
UpperCAmelCase__ : str = pipe(
prompt=__UpperCamelCase , image=__UpperCamelCase , mask_image=__UpperCamelCase , guidance_scale=7.5 , num_inference_steps=20 , generator=__UpperCamelCase , output_type="np" , )
UpperCAmelCase__ : List[str] = output.images
UpperCAmelCase__ : List[Any] = images[0, 2_55:2_58, 2_55:2_58, -1]
assert images.shape == (1, 5_12, 5_12, 3)
UpperCAmelCase__ : int = np.array([0.0086, 0.0077, 0.0083, 0.0093, 0.0107, 0.0139, 0.0094, 0.0097, 0.0125] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
| 660 | 1 |
"""simple docstring"""
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A__ : Optional[Any] = logging.get_logger(__name__)
A__ : Dict = {
"""Salesforce/blip-vqa-base""": """https://huggingface.co/Salesforce/blip-vqa-base/resolve/main/config.json""",
"""Salesforce/blip-vqa-capfit-large""": (
"""https://huggingface.co/Salesforce/blip-vqa-base-capfit/resolve/main/config.json"""
),
"""Salesforce/blip-image-captioning-base""": (
"""https://huggingface.co/Salesforce/blip-image-captioning-base/resolve/main/config.json"""
),
"""Salesforce/blip-image-captioning-large""": (
"""https://huggingface.co/Salesforce/blip-image-captioning-large/resolve/main/config.json"""
),
"""Salesforce/blip-itm-base-coco""": """https://huggingface.co/Salesforce/blip-itm-base-coco/resolve/main/config.json""",
"""Salesforce/blip-itm-large-coco""": """https://huggingface.co/Salesforce/blip-itm-large-coco/resolve/main/config.json""",
"""Salesforce/blip-itm-base-flikr""": """https://huggingface.co/Salesforce/blip-itm-base-flikr/resolve/main/config.json""",
"""Salesforce/blip-itm-large-flikr""": (
"""https://huggingface.co/Salesforce/blip-itm-large-flikr/resolve/main/config.json"""
),
}
class _lowercase ( lowerCAmelCase_ ):
'''simple docstring'''
_A = 'blip_text_model'
def __init__( self , __UpperCamelCase=3_05_24 , __UpperCamelCase=7_68 , __UpperCamelCase=7_68 , __UpperCamelCase=30_72 , __UpperCamelCase=7_68 , __UpperCamelCase=12 , __UpperCamelCase=8 , __UpperCamelCase=5_12 , __UpperCamelCase="gelu" , __UpperCamelCase=1E-12 , __UpperCamelCase=0.0 , __UpperCamelCase=0.0 , __UpperCamelCase=0.02 , __UpperCamelCase=3_05_22 , __UpperCamelCase=2 , __UpperCamelCase=0 , __UpperCamelCase=1_02 , __UpperCamelCase=True , __UpperCamelCase=True , **__UpperCamelCase , )-> Union[str, Any]:
super().__init__(
pad_token_id=__UpperCamelCase , bos_token_id=__UpperCamelCase , eos_token_id=__UpperCamelCase , sep_token_id=__UpperCamelCase , **__UpperCamelCase , )
UpperCAmelCase__ : Tuple = vocab_size
UpperCAmelCase__ : Any = hidden_size
UpperCAmelCase__ : List[str] = encoder_hidden_size
UpperCAmelCase__ : List[Any] = intermediate_size
UpperCAmelCase__ : Tuple = projection_dim
UpperCAmelCase__ : List[Any] = hidden_dropout_prob
UpperCAmelCase__ : List[Any] = num_hidden_layers
UpperCAmelCase__ : List[Any] = num_attention_heads
UpperCAmelCase__ : Any = max_position_embeddings
UpperCAmelCase__ : Any = layer_norm_eps
UpperCAmelCase__ : List[Any] = hidden_act
UpperCAmelCase__ : Any = initializer_range
UpperCAmelCase__ : Tuple = attention_probs_dropout_prob
UpperCAmelCase__ : Tuple = is_decoder
UpperCAmelCase__ : str = use_cache
@classmethod
def lowerCAmelCase__ ( cls , __UpperCamelCase , **__UpperCamelCase )-> "PretrainedConfig":
cls._set_token_in_kwargs(__UpperCamelCase )
UpperCAmelCase__ , UpperCAmelCase__ : Optional[Any] = cls.get_config_dict(__UpperCamelCase , **__UpperCamelCase )
# get the text config dict if we are loading from BlipConfig
if config_dict.get("model_type" ) == "blip":
UpperCAmelCase__ : Optional[Any] = config_dict["text_config"]
if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F"You are using a model of type {config_dict['model_type']} to instantiate a model of type "
F"{cls.model_type}. This is not supported for all configurations of models and can yield errors." )
return cls.from_dict(__UpperCamelCase , **__UpperCamelCase )
class _lowercase ( lowerCAmelCase_ ):
'''simple docstring'''
_A = 'blip_vision_model'
def __init__( self , __UpperCamelCase=7_68 , __UpperCamelCase=30_72 , __UpperCamelCase=5_12 , __UpperCamelCase=12 , __UpperCamelCase=12 , __UpperCamelCase=3_84 , __UpperCamelCase=16 , __UpperCamelCase="gelu" , __UpperCamelCase=1E-5 , __UpperCamelCase=0.0 , __UpperCamelCase=1E-10 , **__UpperCamelCase , )-> List[str]:
super().__init__(**__UpperCamelCase )
UpperCAmelCase__ : Optional[Any] = hidden_size
UpperCAmelCase__ : Optional[Any] = intermediate_size
UpperCAmelCase__ : Optional[Any] = projection_dim
UpperCAmelCase__ : Any = num_hidden_layers
UpperCAmelCase__ : List[Any] = num_attention_heads
UpperCAmelCase__ : List[Any] = patch_size
UpperCAmelCase__ : List[Any] = image_size
UpperCAmelCase__ : int = initializer_range
UpperCAmelCase__ : Optional[int] = attention_dropout
UpperCAmelCase__ : Any = layer_norm_eps
UpperCAmelCase__ : List[str] = hidden_act
@classmethod
def lowerCAmelCase__ ( cls , __UpperCamelCase , **__UpperCamelCase )-> "PretrainedConfig":
cls._set_token_in_kwargs(__UpperCamelCase )
UpperCAmelCase__ , UpperCAmelCase__ : Tuple = cls.get_config_dict(__UpperCamelCase , **__UpperCamelCase )
# get the vision config dict if we are loading from BlipConfig
if config_dict.get("model_type" ) == "blip":
UpperCAmelCase__ : Dict = config_dict["vision_config"]
if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F"You are using a model of type {config_dict['model_type']} to instantiate a model of type "
F"{cls.model_type}. This is not supported for all configurations of models and can yield errors." )
return cls.from_dict(__UpperCamelCase , **__UpperCamelCase )
class _lowercase ( lowerCAmelCase_ ):
'''simple docstring'''
_A = 'blip'
_A = True
def __init__( self , __UpperCamelCase=None , __UpperCamelCase=None , __UpperCamelCase=5_12 , __UpperCamelCase=2.6592 , __UpperCamelCase=2_56 , **__UpperCamelCase , )-> Optional[Any]:
super().__init__(**__UpperCamelCase )
if text_config is None:
UpperCAmelCase__ : Dict = {}
logger.info("`text_config` is `None`. Initializing the `BlipTextConfig` with default values." )
if vision_config is None:
UpperCAmelCase__ : Tuple = {}
logger.info("`vision_config` is `None`. Initializing the `BlipVisionConfig` with default values." )
UpperCAmelCase__ : Dict = BlipTextConfig(**__UpperCamelCase )
UpperCAmelCase__ : Optional[Any] = BlipVisionConfig(**__UpperCamelCase )
UpperCAmelCase__ : Tuple = self.vision_config.hidden_size
UpperCAmelCase__ : Optional[int] = projection_dim
UpperCAmelCase__ : List[Any] = logit_scale_init_value
UpperCAmelCase__ : Tuple = 1.0
UpperCAmelCase__ : List[Any] = 0.02
UpperCAmelCase__ : List[Any] = image_text_hidden_size
@classmethod
def lowerCAmelCase__ ( cls , __UpperCamelCase , __UpperCamelCase , **__UpperCamelCase )-> Optional[int]:
return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **__UpperCamelCase )
def lowerCAmelCase__ ( self )-> Union[str, Any]:
UpperCAmelCase__ : str = copy.deepcopy(self.__dict__ )
UpperCAmelCase__ : Optional[Any] = self.text_config.to_dict()
UpperCAmelCase__ : Any = self.vision_config.to_dict()
UpperCAmelCase__ : Optional[int] = self.__class__.model_type
return output
| 660 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
A__ : Union[str, Any] = logging.get_logger(__name__)
A__ : Optional[int] = {
"""microsoft/table-transformer-detection""": (
"""https://huggingface.co/microsoft/table-transformer-detection/resolve/main/config.json"""
),
}
class _lowercase ( lowerCAmelCase_ ):
'''simple docstring'''
_A = 'table-transformer'
_A = ['past_key_values']
_A = {
'hidden_size': 'd_model',
'num_attention_heads': 'encoder_attention_heads',
}
def __init__( self , __UpperCamelCase=True , __UpperCamelCase=None , __UpperCamelCase=3 , __UpperCamelCase=1_00 , __UpperCamelCase=6 , __UpperCamelCase=20_48 , __UpperCamelCase=8 , __UpperCamelCase=6 , __UpperCamelCase=20_48 , __UpperCamelCase=8 , __UpperCamelCase=0.0 , __UpperCamelCase=0.0 , __UpperCamelCase=True , __UpperCamelCase="relu" , __UpperCamelCase=2_56 , __UpperCamelCase=0.1 , __UpperCamelCase=0.0 , __UpperCamelCase=0.0 , __UpperCamelCase=0.02 , __UpperCamelCase=1.0 , __UpperCamelCase=False , __UpperCamelCase="sine" , __UpperCamelCase="resnet50" , __UpperCamelCase=True , __UpperCamelCase=False , __UpperCamelCase=1 , __UpperCamelCase=5 , __UpperCamelCase=2 , __UpperCamelCase=1 , __UpperCamelCase=1 , __UpperCamelCase=5 , __UpperCamelCase=2 , __UpperCamelCase=0.1 , **__UpperCamelCase , )-> List[Any]:
if backbone_config is not None and use_timm_backbone:
raise ValueError("You can't specify both `backbone_config` and `use_timm_backbone`." )
if not use_timm_backbone:
if backbone_config is None:
logger.info("`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone." )
UpperCAmelCase__ : Any = CONFIG_MAPPING["resnet"](out_features=["stage4"] )
elif isinstance(__UpperCamelCase , __UpperCamelCase ):
UpperCAmelCase__ : int = backbone_config.get("model_type" )
UpperCAmelCase__ : Optional[Any] = CONFIG_MAPPING[backbone_model_type]
UpperCAmelCase__ : int = config_class.from_dict(__UpperCamelCase )
# set timm attributes to None
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : List[str] = None, None, None
UpperCAmelCase__ : Optional[int] = use_timm_backbone
UpperCAmelCase__ : Dict = backbone_config
UpperCAmelCase__ : List[Any] = num_channels
UpperCAmelCase__ : Any = num_queries
UpperCAmelCase__ : int = d_model
UpperCAmelCase__ : Optional[int] = encoder_ffn_dim
UpperCAmelCase__ : str = encoder_layers
UpperCAmelCase__ : Dict = encoder_attention_heads
UpperCAmelCase__ : Optional[Any] = decoder_ffn_dim
UpperCAmelCase__ : Tuple = decoder_layers
UpperCAmelCase__ : Optional[Any] = decoder_attention_heads
UpperCAmelCase__ : List[str] = dropout
UpperCAmelCase__ : Tuple = attention_dropout
UpperCAmelCase__ : List[Any] = activation_dropout
UpperCAmelCase__ : Dict = activation_function
UpperCAmelCase__ : Optional[Any] = init_std
UpperCAmelCase__ : List[str] = init_xavier_std
UpperCAmelCase__ : int = encoder_layerdrop
UpperCAmelCase__ : Tuple = decoder_layerdrop
UpperCAmelCase__ : int = encoder_layers
UpperCAmelCase__ : Dict = auxiliary_loss
UpperCAmelCase__ : Union[str, Any] = position_embedding_type
UpperCAmelCase__ : List[str] = backbone
UpperCAmelCase__ : List[Any] = use_pretrained_backbone
UpperCAmelCase__ : List[str] = dilation
# Hungarian matcher
UpperCAmelCase__ : Dict = class_cost
UpperCAmelCase__ : Any = bbox_cost
UpperCAmelCase__ : Tuple = giou_cost
# Loss coefficients
UpperCAmelCase__ : Any = mask_loss_coefficient
UpperCAmelCase__ : Dict = dice_loss_coefficient
UpperCAmelCase__ : Any = bbox_loss_coefficient
UpperCAmelCase__ : Tuple = giou_loss_coefficient
UpperCAmelCase__ : List[Any] = eos_coefficient
super().__init__(is_encoder_decoder=__UpperCamelCase , **__UpperCamelCase )
@property
def lowerCAmelCase__ ( self )-> int:
return self.encoder_attention_heads
@property
def lowerCAmelCase__ ( self )-> int:
return self.d_model
class _lowercase ( lowerCAmelCase_ ):
'''simple docstring'''
_A = version.parse('1.11' )
@property
def lowerCAmelCase__ ( self )-> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
("pixel_mask", {0: "batch"}),
] )
@property
def lowerCAmelCase__ ( self )-> float:
return 1E-5
@property
def lowerCAmelCase__ ( self )-> int:
return 12
| 660 | 1 |
"""simple docstring"""
# using dfs for finding eulerian path traversal
def a__ ( lowerCAmelCase : Optional[Any] , lowerCAmelCase : Optional[int] , lowerCAmelCase : List[Any] , lowerCAmelCase : str=None ):
'''simple docstring'''
UpperCAmelCase__ : str = (path or []) + [u]
for v in graph[u]:
if visited_edge[u][v] is False:
UpperCAmelCase__ , UpperCAmelCase__ : Optional[Any] = True, True
UpperCAmelCase__ : Dict = dfs(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
return path
def a__ ( lowerCAmelCase : List[str] , lowerCAmelCase : Optional[int] ):
'''simple docstring'''
UpperCAmelCase__ : List[str] = 0
UpperCAmelCase__ : Optional[Any] = -1
for i in range(lowerCAmelCase ):
if i not in graph.keys():
continue
if len(graph[i] ) % 2 == 1:
odd_degree_nodes += 1
UpperCAmelCase__ : Dict = i
if odd_degree_nodes == 0:
return 1, odd_node
if odd_degree_nodes == 2:
return 2, odd_node
return 3, odd_node
def a__ ( lowerCAmelCase : int , lowerCAmelCase : int ):
'''simple docstring'''
UpperCAmelCase__ : Dict = [[False for _ in range(max_node + 1 )] for _ in range(max_node + 1 )]
UpperCAmelCase__ , UpperCAmelCase__ : int = check_circuit_or_path(lowerCAmelCase , lowerCAmelCase )
if check == 3:
print("graph is not Eulerian" )
print("no path" )
return
UpperCAmelCase__ : List[Any] = 1
if check == 2:
UpperCAmelCase__ : Any = odd_node
print("graph has a Euler path" )
if check == 1:
print("graph has a Euler cycle" )
UpperCAmelCase__ : Tuple = dfs(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
print(lowerCAmelCase )
def a__ ( ):
'''simple docstring'''
UpperCAmelCase__ : List[Any] = {1: [2, 3, 4], 2: [1, 3], 3: [1, 2], 4: [1, 5], 5: [4]}
UpperCAmelCase__ : List[str] = {1: [2, 3, 4, 5], 2: [1, 3], 3: [1, 2], 4: [1, 5], 5: [1, 4]}
UpperCAmelCase__ : Dict = {1: [2, 3, 4], 2: [1, 3, 4], 3: [1, 2], 4: [1, 2, 5], 5: [4]}
UpperCAmelCase__ : Dict = {1: [2, 3], 2: [1, 3], 3: [1, 2]}
UpperCAmelCase__ : Any = {
1: [],
2: []
# all degree is zero
}
UpperCAmelCase__ : Tuple = 10
check_euler(lowerCAmelCase , lowerCAmelCase )
check_euler(lowerCAmelCase , lowerCAmelCase )
check_euler(lowerCAmelCase , lowerCAmelCase )
check_euler(lowerCAmelCase , lowerCAmelCase )
check_euler(lowerCAmelCase , lowerCAmelCase )
if __name__ == "__main__":
main()
| 660 |
"""simple docstring"""
import argparse
import shutil
import time
from json import JSONDecodeError
from logging import getLogger
from pathlib import Path
from typing import Dict, List
import torch
from torch.utils.data import DataLoader
from tqdm import tqdm
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
from utils import (
SeqaSeqDataset,
calculate_bleu,
calculate_rouge,
chunks,
lmap,
load_json,
parse_numeric_n_bool_cl_kwargs,
save_json,
use_task_specific_params,
write_txt_file,
)
A__ : int = getLogger(__name__)
def a__ ( lowerCAmelCase : List[Any] , lowerCAmelCase : str , lowerCAmelCase : str , lowerCAmelCase : int = 8 , lowerCAmelCase : int = 1024 , lowerCAmelCase : List[Any]="val" , lowerCAmelCase : str=None , lowerCAmelCase : int=False , lowerCAmelCase : Dict="summarization" , lowerCAmelCase : int=None , lowerCAmelCase : List[str]=1 , lowerCAmelCase : Dict = None , lowerCAmelCase : List[str]="" , **lowerCAmelCase : int , ):
'''simple docstring'''
UpperCAmelCase__ : Dict = str(lowerCAmelCase )
assert local_rank is not None
torch.distributed.init_process_group(backend="nccl" , rank=lowerCAmelCase )
UpperCAmelCase__ : List[str] = Path(lowerCAmelCase )
UpperCAmelCase__ : str = save_dir.joinpath(F"rank_{local_rank}_output.json" )
torch.cuda.set_device(lowerCAmelCase )
UpperCAmelCase__ : Optional[Any] = AutoModelForSeqaSeqLM.from_pretrained(lowerCAmelCase ).cuda()
if fpaa:
UpperCAmelCase__ : List[Any] = model.half()
# determine if we need to increase num_beams
use_task_specific_params(lowerCAmelCase , lowerCAmelCase ) # update config with task specific params
UpperCAmelCase__ : List[Any] = generate_kwargs.pop("num_beams" , model.config.num_beams ) # AttributeError risk?
if num_return_sequences > num_beams:
UpperCAmelCase__ : Any = num_return_sequences
UpperCAmelCase__ : List[Any] = AutoTokenizer.from_pretrained(lowerCAmelCase )
logger.info(F"Inferred tokenizer type: {tokenizer.__class__}" ) # if this is wrong, check config.model_type.
if max_source_length is None:
UpperCAmelCase__ : int = tokenizer.model_max_length
if prefix is None:
UpperCAmelCase__ : Union[str, Any] = prefix or getattr(model.config , "prefix" , "" ) or ""
UpperCAmelCase__ : str = SeqaSeqDataset(
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , max_target_length=1024 , type_path=lowerCAmelCase , n_obs=lowerCAmelCase , prefix=lowerCAmelCase , **lowerCAmelCase , )
# I set shuffle=True for a more accurate progress bar.
# If all the longest samples are first, the prog bar estimate is too high at the beginning.
UpperCAmelCase__ : Union[str, Any] = ds.make_sortish_sampler(lowerCAmelCase , distributed=lowerCAmelCase , add_extra_examples=lowerCAmelCase , shuffle=lowerCAmelCase )
UpperCAmelCase__ : Optional[int] = DataLoader(lowerCAmelCase , sampler=lowerCAmelCase , batch_size=lowerCAmelCase , collate_fn=ds.collate_fn )
UpperCAmelCase__ : str = []
for batch in tqdm(lowerCAmelCase ):
UpperCAmelCase__ : Dict = model.generate(
input_ids=batch["input_ids"].to(model.device ) , attention_mask=batch["attention_mask"].to(model.device ) , num_return_sequences=lowerCAmelCase , num_beams=lowerCAmelCase , **lowerCAmelCase , )
UpperCAmelCase__ : int = tokenizer.batch_decode(lowerCAmelCase , skip_special_tokens=lowerCAmelCase , clean_up_tokenization_spaces=lowerCAmelCase )
UpperCAmelCase__ : int = batch["ids"]
if num_return_sequences > 1:
UpperCAmelCase__ : str = chunks(lowerCAmelCase , lowerCAmelCase ) # batch size chunks, each of size num_return_seq
for i, pred in enumerate(lowerCAmelCase ):
results.append({"pred": pred, "id": ids[i].item()} )
save_json(lowerCAmelCase , lowerCAmelCase )
return results, sampler.num_replicas
def a__ ( ):
'''simple docstring'''
UpperCAmelCase__ : str = argparse.ArgumentParser(
epilog="Unspecified args like --num_beams=2 --decoder_start_token_id=4 are passed to model.generate" )
parser.add_argument("--data_dir" , type=lowerCAmelCase , help="like cnn_dm/test.source" )
parser.add_argument(
"--model_name" , type=lowerCAmelCase , help="like facebook/bart-large-cnn,t5-base, etc." , default="sshleifer/distilbart-xsum-12-3" , )
parser.add_argument("--save_dir" , type=lowerCAmelCase , help="where to save" , default="tmp_gen" )
parser.add_argument("--max_source_length" , type=lowerCAmelCase , default=lowerCAmelCase )
parser.add_argument(
"--type_path" , type=lowerCAmelCase , default="test" , help="which subset to evaluate typically train/val/test" )
parser.add_argument("--task" , type=lowerCAmelCase , default="summarization" , help="used for task_specific_params + metrics" )
parser.add_argument("--bs" , type=lowerCAmelCase , default=8 , required=lowerCAmelCase , help="batch size" )
parser.add_argument(
"--local_rank" , type=lowerCAmelCase , default=-1 , required=lowerCAmelCase , help="should be passed by distributed.launch" )
parser.add_argument(
"--n_obs" , type=lowerCAmelCase , default=lowerCAmelCase , required=lowerCAmelCase , help="How many observations. Defaults to all." )
parser.add_argument(
"--num_return_sequences" , type=lowerCAmelCase , default=1 , required=lowerCAmelCase , help="How many sequences to return" )
parser.add_argument(
"--sync_timeout" , type=lowerCAmelCase , default=600 , required=lowerCAmelCase , help="How long should master process wait for other processes to finish." , )
parser.add_argument("--src_lang" , type=lowerCAmelCase , default=lowerCAmelCase , required=lowerCAmelCase )
parser.add_argument("--tgt_lang" , type=lowerCAmelCase , default=lowerCAmelCase , required=lowerCAmelCase )
parser.add_argument(
"--prefix" , type=lowerCAmelCase , required=lowerCAmelCase , default=lowerCAmelCase , help="will be added to the begininng of src examples" )
parser.add_argument("--fp16" , action="store_true" )
parser.add_argument("--debug" , action="store_true" )
UpperCAmelCase__ : Optional[int] = time.time()
UpperCAmelCase__ , UpperCAmelCase__ : List[Any] = parser.parse_known_args()
UpperCAmelCase__ : int = parse_numeric_n_bool_cl_kwargs(lowerCAmelCase )
if generate_kwargs and args.local_rank <= 0:
print(F"parsed the following generate kwargs: {generate_kwargs}" )
UpperCAmelCase__ : Dict = Path(args.save_dir + "_tmp" )
Path(lowerCAmelCase ).mkdir(exist_ok=lowerCAmelCase ) # this handles locking.
UpperCAmelCase__ : List[str] = list(json_save_dir.glob("rank_*.json" ) )
if intermediate_files:
raise ValueError(F"Found files at {json_save_dir} please move or remove them." )
# In theory, a node could finish and save before another node hits this. If this happens, we can address later.
UpperCAmelCase__ : List[str] = {}
if args.src_lang is not None:
UpperCAmelCase__ : str = args.src_lang
if args.tgt_lang is not None:
UpperCAmelCase__ : List[str] = args.tgt_lang
Path(args.save_dir ).mkdir(exist_ok=lowerCAmelCase )
UpperCAmelCase__ , UpperCAmelCase__ : Optional[int] = eval_data_dir(
args.data_dir , lowerCAmelCase , args.model_name , type_path=args.type_path , bs=args.bs , fpaa=args.fpaa , task=args.task , local_rank=args.local_rank , n_obs=args.n_obs , max_source_length=args.max_source_length , num_return_sequences=args.num_return_sequences , prefix=args.prefix , dataset_kwargs=lowerCAmelCase , **lowerCAmelCase , )
if args.local_rank <= 0:
UpperCAmelCase__ : str = Path(args.save_dir )
save_dir.mkdir(exist_ok=lowerCAmelCase )
UpperCAmelCase__ : Tuple = gather_results_from_each_node(lowerCAmelCase , lowerCAmelCase , args.sync_timeout )
UpperCAmelCase__ : Union[str, Any] = combine_partial_results(lowerCAmelCase )
if args.num_return_sequences > 1:
UpperCAmelCase__ : int = save_dir.joinpath("pseudolabel_results.json" )
print(F"Saving aggregated results at {save_path}, intermediate in {json_save_dir}/" )
save_json(lowerCAmelCase , lowerCAmelCase )
return
UpperCAmelCase__ : Optional[Any] = Path(args.data_dir ).joinpath(args.type_path + ".target" )
with open(lowerCAmelCase ) as f:
UpperCAmelCase__ : Optional[int] = [x.rstrip() for x in f.readlines()][: len(lowerCAmelCase )]
# Calculate metrics, save metrics, and save _generations.txt
UpperCAmelCase__ : List[Any] = "translation" in args.task
UpperCAmelCase__ : Optional[Any] = calculate_bleu if calc_bleu else calculate_rouge
UpperCAmelCase__ : Optional[Any] = "bleu" if calc_bleu else "rouge"
UpperCAmelCase__ : Dict = score_fn(lowerCAmelCase , lowerCAmelCase )
UpperCAmelCase__ : List[Any] = len(lowerCAmelCase )
UpperCAmelCase__ : Union[str, Any] = time.time() - start_time
UpperCAmelCase__ : Optional[int] = round(runtime / metrics["n_obs"] , 4 )
UpperCAmelCase__ : Tuple = num_replicas
# TODO(@stas00): add whatever metadata to metrics
UpperCAmelCase__ : Any = save_dir.joinpath(F"{args.type_path}_{metric_name}.json" )
save_json(lowerCAmelCase , lowerCAmelCase , indent=lowerCAmelCase )
print(lowerCAmelCase )
write_txt_file(lowerCAmelCase , save_dir.joinpath(F"{args.type_path}_generations.txt" ) )
if args.debug:
write_txt_file(lowerCAmelCase , save_dir.joinpath(F"{args.type_path}.target" ) )
else:
shutil.rmtree(lowerCAmelCase )
def a__ ( lowerCAmelCase : Tuple ):
'''simple docstring'''
UpperCAmelCase__ : str = []
for partial_result in partial_results:
records.extend(lowerCAmelCase )
UpperCAmelCase__ : Dict = sorted(lowerCAmelCase , key=lambda lowerCAmelCase : x["id"] )
UpperCAmelCase__ : List[str] = [x["pred"] for x in records]
return preds
def a__ ( lowerCAmelCase : List[Any] , lowerCAmelCase : int , lowerCAmelCase : Optional[int] ):
'''simple docstring'''
# WAIT FOR lots of .json files
UpperCAmelCase__ : int = time.time()
logger.info("waiting for all nodes to finish" )
UpperCAmelCase__ : Dict = None
while (time.time() - start_wait) < timeout:
UpperCAmelCase__ : str = list(save_dir.glob("rank_*.json" ) )
if len(lowerCAmelCase ) < num_replicas:
continue
try:
# make sure all json files are fully saved
UpperCAmelCase__ : Union[str, Any] = lmap(lowerCAmelCase , lowerCAmelCase )
return json_data
except JSONDecodeError:
continue
else:
raise TimeoutError("Rank 0 gave up on waiting for other processes" )
# Unreachable
if __name__ == "__main__":
# Usage for MT:
run_generate()
| 660 | 1 |
"""simple docstring"""
import PIL.Image
import PIL.ImageOps
from packaging import version
from PIL import Image
if version.parse(version.parse(PIL.__version__).base_version) >= version.parse("""9.1.0"""):
A__ : Tuple = {
"""linear""": PIL.Image.Resampling.BILINEAR,
"""bilinear""": PIL.Image.Resampling.BILINEAR,
"""bicubic""": PIL.Image.Resampling.BICUBIC,
"""lanczos""": PIL.Image.Resampling.LANCZOS,
"""nearest""": PIL.Image.Resampling.NEAREST,
}
else:
A__ : Union[str, Any] = {
"""linear""": PIL.Image.LINEAR,
"""bilinear""": PIL.Image.BILINEAR,
"""bicubic""": PIL.Image.BICUBIC,
"""lanczos""": PIL.Image.LANCZOS,
"""nearest""": PIL.Image.NEAREST,
}
def a__ ( lowerCAmelCase : Tuple ):
'''simple docstring'''
UpperCAmelCase__ : Any = (images / 2 + 0.5).clamp(0 , 1 )
UpperCAmelCase__ : str = images.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
UpperCAmelCase__ : Optional[Any] = numpy_to_pil(lowerCAmelCase )
return images
def a__ ( lowerCAmelCase : List[Any] ):
'''simple docstring'''
if images.ndim == 3:
UpperCAmelCase__ : str = images[None, ...]
UpperCAmelCase__ : str = (images * 255).round().astype("uint8" )
if images.shape[-1] == 1:
# special case for grayscale (single channel) images
UpperCAmelCase__ : Any = [Image.fromarray(image.squeeze() , mode="L" ) for image in images]
else:
UpperCAmelCase__ : List[str] = [Image.fromarray(lowerCAmelCase ) for image in images]
return pil_images
| 660 |
"""simple docstring"""
from timeit import timeit
def a__ ( lowerCAmelCase : int ):
'''simple docstring'''
if number < 0:
raise ValueError("the value of input must not be negative" )
UpperCAmelCase__ : Tuple = 0
while number:
number &= number - 1
result += 1
return result
def a__ ( lowerCAmelCase : int ):
'''simple docstring'''
if number < 0:
raise ValueError("the value of input must not be negative" )
UpperCAmelCase__ : Union[str, Any] = 0
while number:
if number % 2 == 1:
result += 1
number >>= 1
return result
def a__ ( ):
'''simple docstring'''
def do_benchmark(lowerCAmelCase : int ) -> None:
UpperCAmelCase__ : Dict = "import __main__ as z"
print(F"Benchmark when {number = }:" )
print(F"{get_set_bits_count_using_modulo_operator(lowerCAmelCase ) = }" )
UpperCAmelCase__ : Tuple = timeit("z.get_set_bits_count_using_modulo_operator(25)" , setup=lowerCAmelCase )
print(F"timeit() runs in {timing} seconds" )
print(F"{get_set_bits_count_using_brian_kernighans_algorithm(lowerCAmelCase ) = }" )
UpperCAmelCase__ : Any = timeit(
"z.get_set_bits_count_using_brian_kernighans_algorithm(25)" , setup=lowerCAmelCase , )
print(F"timeit() runs in {timing} seconds" )
for number in (25, 37, 58, 0):
do_benchmark(lowerCAmelCase )
print()
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 660 | 1 |
"""simple docstring"""
from typing import List
import jiwer
import jiwer.transforms as tr
from packaging import version
import datasets
from datasets.config import PY_VERSION
if PY_VERSION < version.parse("""3.8"""):
import importlib_metadata
else:
import importlib.metadata as importlib_metadata
A__ : Tuple = """"""
if version.parse(importlib_metadata.version("""jiwer""")) < version.parse("""2.3.0"""):
class _lowercase ( tr.AbstractTransform ):
'''simple docstring'''
def __init__( self , __UpperCamelCase = " " )-> Optional[Any]:
UpperCAmelCase__ : Dict = sentence_delimiter
def lowerCAmelCase__ ( self , __UpperCamelCase )-> int:
return list(__UpperCamelCase )
def lowerCAmelCase__ ( self , __UpperCamelCase )-> Optional[int]:
UpperCAmelCase__ : List[Any] = []
for sent_idx, sentence in enumerate(__UpperCamelCase ):
chars.extend(self.process_string(__UpperCamelCase ) )
if self.sentence_delimiter is not None and self.sentence_delimiter != "" and sent_idx < len(__UpperCamelCase ) - 1:
chars.append(self.sentence_delimiter )
return chars
A__ : int = tr.Compose(
[tr.RemoveMultipleSpaces(), tr.Strip(), SentencesToListOfCharacters(SENTENCE_DELIMITER)]
)
else:
A__ : List[Any] = tr.Compose(
[
tr.RemoveMultipleSpaces(),
tr.Strip(),
tr.ReduceToSingleSentence(SENTENCE_DELIMITER),
tr.ReduceToListOfListOfChars(),
]
)
A__ : Optional[int] = """\
@inproceedings{inproceedings,
author = {Morris, Andrew and Maier, Viktoria and Green, Phil},
year = {2004},
month = {01},
pages = {},
title = {From WER and RIL to MER and WIL: improved evaluation measures for connected speech recognition.}
}
"""
A__ : List[Any] = """\
Character error rate (CER) is a common metric of the performance of an automatic speech recognition system.
CER is similar to Word Error Rate (WER), but operates on character instead of word. Please refer to docs of WER for further information.
Character error rate can be computed as:
CER = (S + D + I) / N = (S + D + I) / (S + D + C)
where
S is the number of substitutions,
D is the number of deletions,
I is the number of insertions,
C is the number of correct characters,
N is the number of characters in the reference (N=S+D+C).
CER's output is not always a number between 0 and 1, in particular when there is a high number of insertions. This value is often associated to the percentage of characters that were incorrectly predicted. The lower the value, the better the
performance of the ASR system with a CER of 0 being a perfect score.
"""
A__ : Tuple = """
Computes CER score of transcribed segments against references.
Args:
references: list of references for each speech input.
predictions: list of transcribtions to score.
concatenate_texts: Whether or not to concatenate sentences before evaluation, set to True for more accurate result.
Returns:
(float): the character error rate
Examples:
>>> predictions = [\"this is the prediction\", \"there is an other sample\"]
>>> references = [\"this is the reference\", \"there is another one\"]
>>> cer = datasets.load_metric(\"cer\")
>>> cer_score = cer.compute(predictions=predictions, references=references)
>>> print(cer_score)
0.34146341463414637
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _lowercase ( datasets.Metric ):
'''simple docstring'''
def lowerCAmelCase__ ( self )-> List[Any]:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Value("string" , id="sequence" ),
"references": datasets.Value("string" , id="sequence" ),
} ) , codebase_urls=["https://github.com/jitsi/jiwer/"] , reference_urls=[
"https://en.wikipedia.org/wiki/Word_error_rate",
"https://sites.google.com/site/textdigitisation/qualitymeasures/computingerrorrates",
] , )
def lowerCAmelCase__ ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase=False )-> Tuple:
if concatenate_texts:
return jiwer.compute_measures(
__UpperCamelCase , __UpperCamelCase , truth_transform=__UpperCamelCase , hypothesis_transform=__UpperCamelCase , )["wer"]
UpperCAmelCase__ : Optional[int] = 0
UpperCAmelCase__ : Optional[Any] = 0
for prediction, reference in zip(__UpperCamelCase , __UpperCamelCase ):
UpperCAmelCase__ : Tuple = jiwer.compute_measures(
__UpperCamelCase , __UpperCamelCase , truth_transform=__UpperCamelCase , hypothesis_transform=__UpperCamelCase , )
incorrect += measures["substitutions"] + measures["deletions"] + measures["insertions"]
total += measures["substitutions"] + measures["deletions"] + measures["hits"]
return incorrect / total
| 660 |
"""simple docstring"""
import unittest
from transformers import load_tool
from .test_tools_common import ToolTesterMixin
class _lowercase ( unittest.TestCase , lowerCAmelCase_ ):
'''simple docstring'''
def lowerCAmelCase__ ( self )-> Dict:
UpperCAmelCase__ : Optional[Any] = load_tool("text-classification" )
self.tool.setup()
UpperCAmelCase__ : List[str] = load_tool("text-classification" , remote=__UpperCamelCase )
def lowerCAmelCase__ ( self )-> Union[str, Any]:
UpperCAmelCase__ : Dict = self.tool("That's quite cool" , ["positive", "negative"] )
self.assertEqual(__UpperCamelCase , "positive" )
def lowerCAmelCase__ ( self )-> str:
UpperCAmelCase__ : List[Any] = self.remote_tool("That's quite cool" , ["positive", "negative"] )
self.assertEqual(__UpperCamelCase , "positive" )
def lowerCAmelCase__ ( self )-> Optional[int]:
UpperCAmelCase__ : Any = self.tool(text="That's quite cool" , labels=["positive", "negative"] )
self.assertEqual(__UpperCamelCase , "positive" )
def lowerCAmelCase__ ( self )-> str:
UpperCAmelCase__ : str = self.remote_tool(text="That's quite cool" , labels=["positive", "negative"] )
self.assertEqual(__UpperCamelCase , "positive" )
| 660 | 1 |
"""simple docstring"""
from typing import Optional, Tuple, Union
import tensorflow as tf
from ...activations_tf import ACTaFN
from ...file_utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward
from ...modeling_tf_outputs import (
TFBaseModelOutputWithNoAttention,
TFBaseModelOutputWithPoolingAndNoAttention,
TFSequenceClassifierOutput,
)
from ...modeling_tf_utils import TFPreTrainedModel, TFSequenceClassificationLoss, keras_serializable, unpack_inputs
from ...tf_utils import shape_list
from ...utils import logging
from .configuration_regnet import RegNetConfig
A__ : List[Any] = logging.get_logger(__name__)
# General docstring
A__ : Tuple = """RegNetConfig"""
# Base docstring
A__ : Tuple = """facebook/regnet-y-040"""
A__ : Tuple = [1, 1_088, 7, 7]
# Image classification docstring
A__ : str = """facebook/regnet-y-040"""
A__ : Tuple = """tabby, tabby cat"""
A__ : Dict = [
"""facebook/regnet-y-040""",
# See all regnet models at https://huggingface.co/models?filter=regnet
]
class _lowercase ( tf.keras.layers.Layer ):
'''simple docstring'''
def __init__( self , __UpperCamelCase , __UpperCamelCase = 3 , __UpperCamelCase = 1 , __UpperCamelCase = 1 , __UpperCamelCase = "relu" , **__UpperCamelCase , )-> Optional[Any]:
super().__init__(**__UpperCamelCase )
# The padding and conv has been verified in
# https://colab.research.google.com/gist/sayakpaul/854bc10eeaf21c9ee2119e0b9f3841a7/scratchpad.ipynb
UpperCAmelCase__ : List[str] = tf.keras.layers.ZeroPaddingaD(padding=kernel_size // 2 )
UpperCAmelCase__ : List[str] = tf.keras.layers.ConvaD(
filters=__UpperCamelCase , kernel_size=__UpperCamelCase , strides=__UpperCamelCase , padding="VALID" , groups=__UpperCamelCase , use_bias=__UpperCamelCase , name="convolution" , )
UpperCAmelCase__ : Optional[Any] = tf.keras.layers.BatchNormalization(epsilon=1E-5 , momentum=0.9 , name="normalization" )
UpperCAmelCase__ : int = ACTaFN[activation] if activation is not None else tf.identity
def lowerCAmelCase__ ( self , __UpperCamelCase )-> Optional[Any]:
UpperCAmelCase__ : str = self.convolution(self.padding(__UpperCamelCase ) )
UpperCAmelCase__ : List[str] = self.normalization(__UpperCamelCase )
UpperCAmelCase__ : List[str] = self.activation(__UpperCamelCase )
return hidden_state
class _lowercase ( tf.keras.layers.Layer ):
'''simple docstring'''
def __init__( self , __UpperCamelCase , **__UpperCamelCase )-> List[str]:
super().__init__(**__UpperCamelCase )
UpperCAmelCase__ : Optional[int] = config.num_channels
UpperCAmelCase__ : Optional[int] = TFRegNetConvLayer(
out_channels=config.embedding_size , kernel_size=3 , stride=2 , activation=config.hidden_act , name="embedder" , )
def lowerCAmelCase__ ( self , __UpperCamelCase )-> Optional[Any]:
UpperCAmelCase__ : Any = shape_list(__UpperCamelCase )[1]
if tf.executing_eagerly() and num_channels != self.num_channels:
raise ValueError(
"Make sure that the channel dimension of the pixel values match with the one set in the configuration." )
# When running on CPU, `tf.keras.layers.Conv2D` doesn't support `NCHW` format.
# So change the input format from `NCHW` to `NHWC`.
# shape = (batch_size, in_height, in_width, in_channels=num_channels)
UpperCAmelCase__ : Optional[Any] = tf.transpose(__UpperCamelCase , perm=(0, 2, 3, 1) )
UpperCAmelCase__ : Any = self.embedder(__UpperCamelCase )
return hidden_state
class _lowercase ( tf.keras.layers.Layer ):
'''simple docstring'''
def __init__( self , __UpperCamelCase , __UpperCamelCase = 2 , **__UpperCamelCase )-> Dict:
super().__init__(**__UpperCamelCase )
UpperCAmelCase__ : int = tf.keras.layers.ConvaD(
filters=__UpperCamelCase , kernel_size=1 , strides=__UpperCamelCase , use_bias=__UpperCamelCase , name="convolution" )
UpperCAmelCase__ : List[str] = tf.keras.layers.BatchNormalization(epsilon=1E-5 , momentum=0.9 , name="normalization" )
def lowerCAmelCase__ ( self , __UpperCamelCase , __UpperCamelCase = False )-> tf.Tensor:
return self.normalization(self.convolution(__UpperCamelCase ) , training=__UpperCamelCase )
class _lowercase ( tf.keras.layers.Layer ):
'''simple docstring'''
def __init__( self , __UpperCamelCase , __UpperCamelCase , **__UpperCamelCase )-> Dict:
super().__init__(**__UpperCamelCase )
UpperCAmelCase__ : str = tf.keras.layers.GlobalAveragePoolingaD(keepdims=__UpperCamelCase , name="pooler" )
UpperCAmelCase__ : Any = [
tf.keras.layers.ConvaD(filters=__UpperCamelCase , kernel_size=1 , activation="relu" , name="attention.0" ),
tf.keras.layers.ConvaD(filters=__UpperCamelCase , kernel_size=1 , activation="sigmoid" , name="attention.2" ),
]
def lowerCAmelCase__ ( self , __UpperCamelCase )-> str:
# [batch_size, h, w, num_channels] -> [batch_size, 1, 1, num_channels]
UpperCAmelCase__ : Optional[int] = self.pooler(__UpperCamelCase )
for layer_module in self.attention:
UpperCAmelCase__ : List[Any] = layer_module(__UpperCamelCase )
UpperCAmelCase__ : Tuple = hidden_state * pooled
return hidden_state
class _lowercase ( tf.keras.layers.Layer ):
'''simple docstring'''
def __init__( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = 1 , **__UpperCamelCase )-> Union[str, Any]:
super().__init__(**__UpperCamelCase )
UpperCAmelCase__ : Tuple = in_channels != out_channels or stride != 1
UpperCAmelCase__ : str = max(1 , out_channels // config.groups_width )
UpperCAmelCase__ : Optional[int] = (
TFRegNetShortCut(__UpperCamelCase , stride=__UpperCamelCase , name="shortcut" )
if should_apply_shortcut
else tf.keras.layers.Activation("linear" , name="shortcut" )
)
# `self.layers` instead of `self.layer` because that is a reserved argument.
UpperCAmelCase__ : Any = [
TFRegNetConvLayer(__UpperCamelCase , kernel_size=1 , activation=config.hidden_act , name="layer.0" ),
TFRegNetConvLayer(
__UpperCamelCase , stride=__UpperCamelCase , groups=__UpperCamelCase , activation=config.hidden_act , name="layer.1" ),
TFRegNetConvLayer(__UpperCamelCase , kernel_size=1 , activation=__UpperCamelCase , name="layer.2" ),
]
UpperCAmelCase__ : Optional[Any] = ACTaFN[config.hidden_act]
def lowerCAmelCase__ ( self , __UpperCamelCase )-> List[str]:
UpperCAmelCase__ : Tuple = hidden_state
for layer_module in self.layers:
UpperCAmelCase__ : Optional[int] = layer_module(__UpperCamelCase )
UpperCAmelCase__ : Union[str, Any] = self.shortcut(__UpperCamelCase )
hidden_state += residual
UpperCAmelCase__ : Union[str, Any] = self.activation(__UpperCamelCase )
return hidden_state
class _lowercase ( tf.keras.layers.Layer ):
'''simple docstring'''
def __init__( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = 1 , **__UpperCamelCase )-> List[Any]:
super().__init__(**__UpperCamelCase )
UpperCAmelCase__ : int = in_channels != out_channels or stride != 1
UpperCAmelCase__ : Optional[Any] = max(1 , out_channels // config.groups_width )
UpperCAmelCase__ : Union[str, Any] = (
TFRegNetShortCut(__UpperCamelCase , stride=__UpperCamelCase , name="shortcut" )
if should_apply_shortcut
else tf.keras.layers.Activation("linear" , name="shortcut" )
)
UpperCAmelCase__ : Optional[Any] = [
TFRegNetConvLayer(__UpperCamelCase , kernel_size=1 , activation=config.hidden_act , name="layer.0" ),
TFRegNetConvLayer(
__UpperCamelCase , stride=__UpperCamelCase , groups=__UpperCamelCase , activation=config.hidden_act , name="layer.1" ),
TFRegNetSELayer(__UpperCamelCase , reduced_channels=int(round(in_channels / 4 ) ) , name="layer.2" ),
TFRegNetConvLayer(__UpperCamelCase , kernel_size=1 , activation=__UpperCamelCase , name="layer.3" ),
]
UpperCAmelCase__ : List[str] = ACTaFN[config.hidden_act]
def lowerCAmelCase__ ( self , __UpperCamelCase )-> Dict:
UpperCAmelCase__ : List[Any] = hidden_state
for layer_module in self.layers:
UpperCAmelCase__ : Optional[int] = layer_module(__UpperCamelCase )
UpperCAmelCase__ : Dict = self.shortcut(__UpperCamelCase )
hidden_state += residual
UpperCAmelCase__ : Optional[Any] = self.activation(__UpperCamelCase )
return hidden_state
class _lowercase ( tf.keras.layers.Layer ):
'''simple docstring'''
def __init__( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = 2 , __UpperCamelCase = 2 , **__UpperCamelCase )-> int:
super().__init__(**__UpperCamelCase )
UpperCAmelCase__ : Dict = TFRegNetXLayer if config.layer_type == "x" else TFRegNetYLayer
UpperCAmelCase__ : List[str] = [
# downsampling is done in the first layer with stride of 2
layer(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , stride=__UpperCamelCase , name="layers.0" ),
*[layer(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , name=F"layers.{i+1}" ) for i in range(depth - 1 )],
]
def lowerCAmelCase__ ( self , __UpperCamelCase )-> str:
for layer_module in self.layers:
UpperCAmelCase__ : List[Any] = layer_module(__UpperCamelCase )
return hidden_state
class _lowercase ( tf.keras.layers.Layer ):
'''simple docstring'''
def __init__( self , __UpperCamelCase , **__UpperCamelCase )-> Dict:
super().__init__(**__UpperCamelCase )
UpperCAmelCase__ : Optional[Any] = []
# based on `downsample_in_first_stage`, the first layer of the first stage may or may not downsample the input
self.stages.append(
TFRegNetStage(
__UpperCamelCase , config.embedding_size , config.hidden_sizes[0] , stride=2 if config.downsample_in_first_stage else 1 , depth=config.depths[0] , name="stages.0" , ) )
UpperCAmelCase__ : int = zip(config.hidden_sizes , config.hidden_sizes[1:] )
for i, ((in_channels, out_channels), depth) in enumerate(zip(__UpperCamelCase , config.depths[1:] ) ):
self.stages.append(TFRegNetStage(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , depth=__UpperCamelCase , name=F"stages.{i+1}" ) )
def lowerCAmelCase__ ( self , __UpperCamelCase , __UpperCamelCase = False , __UpperCamelCase = True )-> TFBaseModelOutputWithNoAttention:
UpperCAmelCase__ : Dict = () if output_hidden_states else None
for stage_module in self.stages:
if output_hidden_states:
UpperCAmelCase__ : Any = hidden_states + (hidden_state,)
UpperCAmelCase__ : int = stage_module(__UpperCamelCase )
if output_hidden_states:
UpperCAmelCase__ : str = hidden_states + (hidden_state,)
if not return_dict:
return tuple(v for v in [hidden_state, hidden_states] if v is not None )
return TFBaseModelOutputWithNoAttention(last_hidden_state=__UpperCamelCase , hidden_states=__UpperCamelCase )
@keras_serializable
class _lowercase ( tf.keras.layers.Layer ):
'''simple docstring'''
_A = RegNetConfig
def __init__( self , __UpperCamelCase , **__UpperCamelCase )-> Dict:
super().__init__(**__UpperCamelCase )
UpperCAmelCase__ : Dict = config
UpperCAmelCase__ : int = TFRegNetEmbeddings(__UpperCamelCase , name="embedder" )
UpperCAmelCase__ : Dict = TFRegNetEncoder(__UpperCamelCase , name="encoder" )
UpperCAmelCase__ : int = tf.keras.layers.GlobalAveragePoolingaD(keepdims=__UpperCamelCase , name="pooler" )
@unpack_inputs
def lowerCAmelCase__ ( self , __UpperCamelCase , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = False , )-> TFBaseModelOutputWithPoolingAndNoAttention:
UpperCAmelCase__ : int = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
UpperCAmelCase__ : Optional[Any] = return_dict if return_dict is not None else self.config.use_return_dict
UpperCAmelCase__ : str = self.embedder(__UpperCamelCase , training=__UpperCamelCase )
UpperCAmelCase__ : Any = self.encoder(
__UpperCamelCase , output_hidden_states=__UpperCamelCase , return_dict=__UpperCamelCase , training=__UpperCamelCase )
UpperCAmelCase__ : Optional[int] = encoder_outputs[0]
UpperCAmelCase__ : Dict = self.pooler(__UpperCamelCase )
# Change to NCHW output format have uniformity in the modules
UpperCAmelCase__ : Union[str, Any] = tf.transpose(__UpperCamelCase , perm=(0, 3, 1, 2) )
UpperCAmelCase__ : Optional[Any] = tf.transpose(__UpperCamelCase , perm=(0, 3, 1, 2) )
# Change the other hidden state outputs to NCHW as well
if output_hidden_states:
UpperCAmelCase__ : Dict = tuple([tf.transpose(__UpperCamelCase , perm=(0, 3, 1, 2) ) for h in encoder_outputs[1]] )
if not return_dict:
return (last_hidden_state, pooled_output) + encoder_outputs[1:]
return TFBaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=__UpperCamelCase , pooler_output=__UpperCamelCase , hidden_states=hidden_states if output_hidden_states else encoder_outputs.hidden_states , )
class _lowercase ( lowerCAmelCase_ ):
'''simple docstring'''
_A = RegNetConfig
_A = 'regnet'
_A = 'pixel_values'
@property
def lowerCAmelCase__ ( self )-> List[str]:
return {"pixel_values": tf.TensorSpec(shape=(None, self.config.num_channels, 2_24, 2_24) , dtype=tf.floataa )}
A__ : Tuple = R"""
Parameters:
This model is a Tensorflow
[tf.keras.layers.Layer](https://www.tensorflow.org/api_docs/python/tf/keras/layers/Layer) sub-class. Use it as a
regular Tensorflow Module and refer to the Tensorflow documentation for all matter related to general usage and
behavior.
config ([`RegNetConfig`]): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the [`~TFPreTrainedModel.from_pretrained`] method to load the model weights.
"""
A__ : Union[str, Any] = R"""
Args:
pixel_values (`tf.Tensor` of shape `(batch_size, num_channels, height, width)`):
Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See
[`ConveNextImageProcessor.__call__`] for details.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
"""
@add_start_docstrings(
'The bare RegNet model outputting raw features without any specific head on top.' , lowerCAmelCase_ , )
class _lowercase ( lowerCAmelCase_ ):
'''simple docstring'''
def __init__( self , __UpperCamelCase , *__UpperCamelCase , **__UpperCamelCase )-> List[str]:
super().__init__(__UpperCamelCase , *__UpperCamelCase , **__UpperCamelCase )
UpperCAmelCase__ : Tuple = TFRegNetMainLayer(__UpperCamelCase , name="regnet" )
@unpack_inputs
@add_start_docstrings_to_model_forward(__UpperCamelCase )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=__UpperCamelCase , config_class=_CONFIG_FOR_DOC , modality="vision" , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def lowerCAmelCase__ ( self , __UpperCamelCase , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase=False , )-> Union[TFBaseModelOutputWithPoolingAndNoAttention, Tuple[tf.Tensor]]:
UpperCAmelCase__ : Any = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
UpperCAmelCase__ : Optional[Any] = return_dict if return_dict is not None else self.config.use_return_dict
UpperCAmelCase__ : Any = self.regnet(
pixel_values=__UpperCamelCase , output_hidden_states=__UpperCamelCase , return_dict=__UpperCamelCase , training=__UpperCamelCase , )
if not return_dict:
return (outputs[0],) + outputs[1:]
return TFBaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=outputs.last_hidden_state , pooler_output=outputs.pooler_output , hidden_states=outputs.hidden_states , )
@add_start_docstrings(
'\n RegNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for\n ImageNet.\n ' , lowerCAmelCase_ , )
class _lowercase ( lowerCAmelCase_ , lowerCAmelCase_ ):
'''simple docstring'''
def __init__( self , __UpperCamelCase , *__UpperCamelCase , **__UpperCamelCase )-> Tuple:
super().__init__(__UpperCamelCase , *__UpperCamelCase , **__UpperCamelCase )
UpperCAmelCase__ : Tuple = config.num_labels
UpperCAmelCase__ : Optional[Any] = TFRegNetMainLayer(__UpperCamelCase , name="regnet" )
# classification head
UpperCAmelCase__ : Optional[Any] = [
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(config.num_labels , name="classifier.1" ) if config.num_labels > 0 else tf.identity,
]
@unpack_inputs
@add_start_docstrings_to_model_forward(__UpperCamelCase )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=__UpperCamelCase , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def lowerCAmelCase__ ( self , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase=False , )-> Union[TFSequenceClassifierOutput, Tuple[tf.Tensor]]:
UpperCAmelCase__ : Dict = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
UpperCAmelCase__ : Optional[Any] = return_dict if return_dict is not None else self.config.use_return_dict
UpperCAmelCase__ : Optional[int] = self.regnet(
__UpperCamelCase , output_hidden_states=__UpperCamelCase , return_dict=__UpperCamelCase , training=__UpperCamelCase )
UpperCAmelCase__ : Union[str, Any] = outputs.pooler_output if return_dict else outputs[1]
UpperCAmelCase__ : Any = self.classifier[0](__UpperCamelCase )
UpperCAmelCase__ : List[str] = self.classifier[1](__UpperCamelCase )
UpperCAmelCase__ : Optional[Any] = None if labels is None else self.hf_compute_loss(labels=__UpperCamelCase , logits=__UpperCamelCase )
if not return_dict:
UpperCAmelCase__ : int = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return TFSequenceClassifierOutput(loss=__UpperCamelCase , logits=__UpperCamelCase , hidden_states=outputs.hidden_states )
| 660 |
"""simple docstring"""
def a__ ( lowerCAmelCase : list , lowerCAmelCase : list ):
'''simple docstring'''
_validate_point(lowerCAmelCase )
_validate_point(lowerCAmelCase )
if len(lowerCAmelCase ) != len(lowerCAmelCase ):
raise ValueError("Both points must be in the same n-dimensional space" )
return float(sum(abs(a - b ) for a, b in zip(lowerCAmelCase , lowerCAmelCase ) ) )
def a__ ( lowerCAmelCase : list[float] ):
'''simple docstring'''
if point:
if isinstance(lowerCAmelCase , lowerCAmelCase ):
for item in point:
if not isinstance(lowerCAmelCase , (int, float) ):
UpperCAmelCase__ : Tuple = (
"Expected a list of numbers as input, found "
F"{type(lowerCAmelCase ).__name__}"
)
raise TypeError(lowerCAmelCase )
else:
UpperCAmelCase__ : Dict = F"Expected a list of numbers as input, found {type(lowerCAmelCase ).__name__}"
raise TypeError(lowerCAmelCase )
else:
raise ValueError("Missing an input" )
def a__ ( lowerCAmelCase : list , lowerCAmelCase : list ):
'''simple docstring'''
_validate_point(lowerCAmelCase )
_validate_point(lowerCAmelCase )
if len(lowerCAmelCase ) != len(lowerCAmelCase ):
raise ValueError("Both points must be in the same n-dimensional space" )
return float(sum(abs(x - y ) for x, y in zip(lowerCAmelCase , lowerCAmelCase ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 660 | 1 |
"""simple docstring"""
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer, TensorType, is_torch_available
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast
from ...utils import logging
A__ : Any = logging.get_logger(__name__)
A__ : Tuple = {
"""EleutherAI/gpt-neo-1.3B""": """https://huggingface.co/EleutherAI/gpt-neo-1.3B/resolve/main/config.json""",
# See all GPTNeo models at https://huggingface.co/models?filter=gpt_neo
}
class _lowercase ( lowerCAmelCase_ ):
'''simple docstring'''
_A = 'gpt_neo'
_A = ['past_key_values']
_A = {'num_attention_heads': 'num_heads', 'num_hidden_layers': 'num_layers'}
def __init__( self , __UpperCamelCase=5_02_57 , __UpperCamelCase=20_48 , __UpperCamelCase=20_48 , __UpperCamelCase=24 , __UpperCamelCase=[[["global", "local"], 12]] , __UpperCamelCase=16 , __UpperCamelCase=None , __UpperCamelCase=2_56 , __UpperCamelCase="gelu_new" , __UpperCamelCase=0.0 , __UpperCamelCase=0.0 , __UpperCamelCase=0.0 , __UpperCamelCase=0.1 , __UpperCamelCase=1E-5 , __UpperCamelCase=0.02 , __UpperCamelCase=True , __UpperCamelCase=5_02_56 , __UpperCamelCase=5_02_56 , **__UpperCamelCase , )-> List[Any]:
UpperCAmelCase__ : List[str] = vocab_size
UpperCAmelCase__ : Optional[Any] = max_position_embeddings
UpperCAmelCase__ : Optional[Any] = hidden_size
UpperCAmelCase__ : Optional[int] = num_layers
UpperCAmelCase__ : Optional[Any] = num_heads
UpperCAmelCase__ : Any = intermediate_size
UpperCAmelCase__ : str = window_size
UpperCAmelCase__ : Union[str, Any] = activation_function
UpperCAmelCase__ : Optional[int] = resid_dropout
UpperCAmelCase__ : str = embed_dropout
UpperCAmelCase__ : str = attention_dropout
UpperCAmelCase__ : Optional[Any] = classifier_dropout
UpperCAmelCase__ : str = layer_norm_epsilon
UpperCAmelCase__ : Optional[int] = initializer_range
UpperCAmelCase__ : int = use_cache
UpperCAmelCase__ : Dict = bos_token_id
UpperCAmelCase__ : Tuple = eos_token_id
UpperCAmelCase__ : List[Any] = attention_types
UpperCAmelCase__ : Dict = self.expand_attention_types_params(__UpperCamelCase )
if len(self.attention_layers ) != self.num_layers:
raise ValueError(
"Configuration for convolutional module is incorrect. "
"It is required that `len(config.attention_layers)` == `config.num_layers` "
F"but is `len(config.attention_layers) = {len(self.attention_layers )}`, "
F"`config.num_layers = {self.num_layers}`. "
"`config.attention_layers` is prepared using `config.attention_types`. "
"Please verify the value of `config.attention_types` argument." )
super().__init__(bos_token_id=__UpperCamelCase , eos_token_id=__UpperCamelCase , **__UpperCamelCase )
@staticmethod
def lowerCAmelCase__ ( __UpperCamelCase )-> Optional[Any]:
UpperCAmelCase__ : List[str] = []
for item in attention_types:
for _ in range(item[1] ):
attentions.extend(item[0] )
return attentions
def a__ ( lowerCAmelCase : Optional[Any] , lowerCAmelCase : Dict , lowerCAmelCase : Tuple , lowerCAmelCase : str ):
'''simple docstring'''
import torch
UpperCAmelCase__ : str = input.size()
UpperCAmelCase__ : List[Any] = len(lowerCAmelCase )
UpperCAmelCase__ : List[Any] = shape[dimension]
UpperCAmelCase__ : Dict = torch.arange(0 , lowerCAmelCase , lowerCAmelCase )
UpperCAmelCase__ : List[Any] = torch.div(sizedim - size , lowerCAmelCase , rounding_mode="floor" ) + 1
UpperCAmelCase__ : int = torch.arange(lowerCAmelCase ) + low_indices[:min_length][:, None]
UpperCAmelCase__ : int = [slice(lowerCAmelCase )] * rank
UpperCAmelCase__ : List[Any] = indices
UpperCAmelCase__ : List[Any] = input[s]
UpperCAmelCase__ : List[Any] = list(range(0 , rank + 1 ) )
perm.append(perm.pop(dimension + 1 ) )
return sliced.permute(lowerCAmelCase )
def a__ ( lowerCAmelCase : str , lowerCAmelCase : Optional[Any] ):
'''simple docstring'''
import torch
UpperCAmelCase__ : Any = torch.arange(1 , lowerCAmelCase )
UpperCAmelCase__ : int = torch.remainder(lowerCAmelCase , lowerCAmelCase )
UpperCAmelCase__ : Any = remainders == 0
UpperCAmelCase__ : Any = candidates[divisor_indices]
UpperCAmelCase__ : Union[str, Any] = torch.max(lowerCAmelCase )
return largest_divisor, torch.div(lowerCAmelCase , lowerCAmelCase , rounding_mode="floor" )
class _lowercase ( lowerCAmelCase_ ):
'''simple docstring'''
@property
def lowerCAmelCase__ ( self )-> Mapping[str, Mapping[int, str]]:
UpperCAmelCase__ : Union[str, Any] = OrderedDict({"input_ids": {0: "batch", 1: "sequence"}} )
if self.use_past:
self.fill_with_past_key_values_(__UpperCamelCase , direction="inputs" )
UpperCAmelCase__ : List[Any] = {0: "batch", 1: "past_sequence + sequence"}
else:
UpperCAmelCase__ : int = {0: "batch", 1: "sequence"}
return common_inputs
@property
def lowerCAmelCase__ ( self )-> int:
return self._config.num_heads
def lowerCAmelCase__ ( self , __UpperCamelCase , __UpperCamelCase = -1 , __UpperCamelCase = -1 , __UpperCamelCase = False , __UpperCamelCase = None , )-> Mapping[str, Any]:
UpperCAmelCase__ : List[str] = super(__UpperCamelCase , self ).generate_dummy_inputs(
__UpperCamelCase , batch_size=__UpperCamelCase , seq_length=__UpperCamelCase , is_pair=__UpperCamelCase , framework=__UpperCamelCase )
# We need to order the input in the way they appears in the forward()
UpperCAmelCase__ : str = OrderedDict({"input_ids": common_inputs["input_ids"]} )
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed." )
else:
import torch
UpperCAmelCase__ , UpperCAmelCase__ : Tuple = common_inputs["input_ids"].shape
# Not using the same length for past_key_values
UpperCAmelCase__ : Union[str, Any] = seqlen + 2
UpperCAmelCase__ : str = (
batch,
self.num_attention_heads,
past_key_values_length,
self._config.hidden_size // self.num_attention_heads,
)
UpperCAmelCase__ : Optional[int] = [
(torch.zeros(__UpperCamelCase ), torch.zeros(__UpperCamelCase )) for _ in range(self.num_layers )
]
UpperCAmelCase__ : int = common_inputs["attention_mask"]
if self.use_past:
UpperCAmelCase__ : Any = ordered_inputs["attention_mask"].dtype
UpperCAmelCase__ : Any = torch.cat(
[ordered_inputs["attention_mask"], torch.ones(__UpperCamelCase , __UpperCamelCase , dtype=__UpperCamelCase )] , dim=1 )
return ordered_inputs
@property
def lowerCAmelCase__ ( self )-> int:
return 13
| 660 |
"""simple docstring"""
import math
def a__ ( lowerCAmelCase : int ):
'''simple docstring'''
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(lowerCAmelCase ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def a__ ( lowerCAmelCase : int = 1_0001 ):
'''simple docstring'''
try:
UpperCAmelCase__ : List[str] = int(lowerCAmelCase )
except (TypeError, ValueError):
raise TypeError("Parameter nth must be int or castable to int." ) from None
if nth <= 0:
raise ValueError("Parameter nth must be greater than or equal to one." )
UpperCAmelCase__ : list[int] = []
UpperCAmelCase__ : str = 2
while len(lowerCAmelCase ) < nth:
if is_prime(lowerCAmelCase ):
primes.append(lowerCAmelCase )
num += 1
else:
num += 1
return primes[len(lowerCAmelCase ) - 1]
if __name__ == "__main__":
print(f"""{solution() = }""")
| 660 | 1 |
"""simple docstring"""
import re
def a__ ( lowerCAmelCase : str ):
'''simple docstring'''
if len(re.findall("[ATCG]" , lowerCAmelCase ) ) != len(lowerCAmelCase ):
raise ValueError("Invalid Strand" )
return dna.translate(dna.maketrans("ATCG" , "TAGC" ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 660 |
"""simple docstring"""
import inspect
import math
import tempfile
import unittest
import numpy as np
from transformers import ViTMAEConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTMAEForPreTraining, ViTMAEModel
from transformers.models.vit.modeling_vit import VIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class _lowercase :
'''simple docstring'''
def __init__( self , __UpperCamelCase , __UpperCamelCase=13 , __UpperCamelCase=30 , __UpperCamelCase=2 , __UpperCamelCase=3 , __UpperCamelCase=True , __UpperCamelCase=True , __UpperCamelCase=32 , __UpperCamelCase=5 , __UpperCamelCase=4 , __UpperCamelCase=37 , __UpperCamelCase="gelu" , __UpperCamelCase=0.1 , __UpperCamelCase=0.1 , __UpperCamelCase=10 , __UpperCamelCase=0.02 , __UpperCamelCase=3 , __UpperCamelCase=0.6 , __UpperCamelCase=None , )-> List[Any]:
UpperCAmelCase__ : str = parent
UpperCAmelCase__ : Optional[Any] = batch_size
UpperCAmelCase__ : Any = image_size
UpperCAmelCase__ : Dict = patch_size
UpperCAmelCase__ : Optional[Any] = num_channels
UpperCAmelCase__ : Union[str, Any] = is_training
UpperCAmelCase__ : Any = use_labels
UpperCAmelCase__ : List[Any] = hidden_size
UpperCAmelCase__ : Dict = num_hidden_layers
UpperCAmelCase__ : List[str] = num_attention_heads
UpperCAmelCase__ : List[Any] = intermediate_size
UpperCAmelCase__ : Dict = hidden_act
UpperCAmelCase__ : List[Any] = hidden_dropout_prob
UpperCAmelCase__ : Optional[int] = attention_probs_dropout_prob
UpperCAmelCase__ : Optional[Any] = type_sequence_label_size
UpperCAmelCase__ : Union[str, Any] = initializer_range
UpperCAmelCase__ : int = mask_ratio
UpperCAmelCase__ : Tuple = scope
# in ViTMAE, the expected sequence length = (num_patches + 1) * (1 - config.mask_ratio), rounded above
# (we add 1 for the [CLS] token)
UpperCAmelCase__ : int = (image_size // patch_size) ** 2
UpperCAmelCase__ : str = int(math.ceil((1 - mask_ratio) * (num_patches + 1) ) )
def lowerCAmelCase__ ( self )-> List[Any]:
UpperCAmelCase__ : List[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCAmelCase__ : Optional[Any] = None
if self.use_labels:
UpperCAmelCase__ : Dict = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCAmelCase__ : Optional[int] = self.get_config()
return config, pixel_values, labels
def lowerCAmelCase__ ( self )-> int:
return ViTMAEConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=__UpperCamelCase , initializer_range=self.initializer_range , mask_ratio=self.mask_ratio , )
def lowerCAmelCase__ ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )-> Union[str, Any]:
UpperCAmelCase__ : Optional[Any] = ViTMAEModel(config=__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
UpperCAmelCase__ : List[str] = model(__UpperCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCAmelCase__ ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )-> Optional[int]:
UpperCAmelCase__ : List[Any] = ViTMAEForPreTraining(__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
UpperCAmelCase__ : Optional[int] = model(__UpperCamelCase )
UpperCAmelCase__ : List[str] = (self.image_size // self.patch_size) ** 2
UpperCAmelCase__ : List[str] = self.patch_size**2 * self.num_channels
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) )
# test greyscale images
UpperCAmelCase__ : Dict = 1
UpperCAmelCase__ : str = ViTMAEForPreTraining(__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
UpperCAmelCase__ : int = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
UpperCAmelCase__ : List[str] = model(__UpperCamelCase )
UpperCAmelCase__ : List[str] = self.patch_size**2
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) )
def lowerCAmelCase__ ( self )-> Dict:
UpperCAmelCase__ : Tuple = self.prepare_config_and_inputs()
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : str = config_and_inputs
UpperCAmelCase__ : int = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class _lowercase ( lowerCAmelCase_ , lowerCAmelCase_ , unittest.TestCase ):
'''simple docstring'''
_A = (ViTMAEModel, ViTMAEForPreTraining) if is_torch_available() else ()
_A = {'feature-extraction': ViTMAEModel} if is_torch_available() else {}
_A = False
_A = False
_A = False
_A = False
def lowerCAmelCase__ ( self )-> List[Any]:
UpperCAmelCase__ : Any = ViTMAEModelTester(self )
UpperCAmelCase__ : List[str] = ConfigTester(self , config_class=__UpperCamelCase , has_text_modality=__UpperCamelCase , hidden_size=37 )
def lowerCAmelCase__ ( self )-> int:
self.config_tester.run_common_tests()
@unittest.skip(reason="ViTMAE does not use inputs_embeds" )
def lowerCAmelCase__ ( self )-> Dict:
pass
def lowerCAmelCase__ ( self )-> List[str]:
UpperCAmelCase__ , UpperCAmelCase__ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase__ : int = model_class(__UpperCamelCase )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
UpperCAmelCase__ : Tuple = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__UpperCamelCase , nn.Linear ) )
def lowerCAmelCase__ ( self )-> Optional[Any]:
UpperCAmelCase__ , UpperCAmelCase__ : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase__ : List[str] = model_class(__UpperCamelCase )
UpperCAmelCase__ : Dict = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase__ : Dict = [*signature.parameters.keys()]
UpperCAmelCase__ : Tuple = ["pixel_values"]
self.assertListEqual(arg_names[:1] , __UpperCamelCase )
def lowerCAmelCase__ ( self )-> Any:
UpperCAmelCase__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__UpperCamelCase )
def lowerCAmelCase__ ( self )-> Dict:
UpperCAmelCase__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*__UpperCamelCase )
def lowerCAmelCase__ ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )-> Dict:
# make masks reproducible
np.random.seed(2 )
UpperCAmelCase__ : Tuple = int((pt_model.config.image_size // pt_model.config.patch_size) ** 2 )
UpperCAmelCase__ : Union[str, Any] = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
UpperCAmelCase__ : str = torch.from_numpy(__UpperCamelCase )
# Add `noise` argument.
# PT inputs will be prepared in `super().check_pt_tf_models()` with this added `noise` argument
UpperCAmelCase__ : Optional[Any] = pt_noise
super().check_pt_tf_models(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
def lowerCAmelCase__ ( self )-> List[Any]:
UpperCAmelCase__ , UpperCAmelCase__ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase__ : List[Any] = model_class(__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
# make random mask reproducible
torch.manual_seed(2 )
with torch.no_grad():
UpperCAmelCase__ : Optional[int] = model(**self._prepare_for_class(__UpperCamelCase , __UpperCamelCase ) )
UpperCAmelCase__ : Optional[Any] = outputs[0].cpu().numpy()
UpperCAmelCase__ : Union[str, Any] = 0
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(__UpperCamelCase )
UpperCAmelCase__ : Union[str, Any] = model_class.from_pretrained(__UpperCamelCase )
model.to(__UpperCamelCase )
# make random mask reproducible
torch.manual_seed(2 )
with torch.no_grad():
UpperCAmelCase__ : List[Any] = model(**self._prepare_for_class(__UpperCamelCase , __UpperCamelCase ) )
# Make sure we don't have nans
UpperCAmelCase__ : Tuple = after_outputs[0].cpu().numpy()
UpperCAmelCase__ : int = 0
UpperCAmelCase__ : str = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(__UpperCamelCase , 1E-5 )
@unittest.skip(
reason="ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load\n to get deterministic results." )
def lowerCAmelCase__ ( self )-> List[str]:
pass
@unittest.skip(
reason="ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load\n to get deterministic results." )
def lowerCAmelCase__ ( self )-> Any:
pass
@unittest.skip(
reason="ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load\n to get deterministic results." )
def lowerCAmelCase__ ( self )-> Optional[Any]:
pass
@unittest.skip(reason="ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load" )
def lowerCAmelCase__ ( self )-> List[Any]:
pass
@unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." )
def lowerCAmelCase__ ( self )-> Union[str, Any]:
pass
@slow
def lowerCAmelCase__ ( self )-> Union[str, Any]:
for model_name in VIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase__ : Tuple = ViTMAEModel.from_pretrained(__UpperCamelCase )
self.assertIsNotNone(__UpperCamelCase )
def a__ ( ):
'''simple docstring'''
UpperCAmelCase__ : Union[str, Any] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class _lowercase ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def lowerCAmelCase__ ( self )-> List[Any]:
return ViTImageProcessor.from_pretrained("facebook/vit-mae-base" ) if is_vision_available() else None
@slow
def lowerCAmelCase__ ( self )-> Optional[int]:
# make random mask reproducible across the PT and TF model
np.random.seed(2 )
UpperCAmelCase__ : Any = ViTMAEForPreTraining.from_pretrained("facebook/vit-mae-base" ).to(__UpperCamelCase )
UpperCAmelCase__ : Tuple = self.default_image_processor
UpperCAmelCase__ : List[Any] = prepare_img()
UpperCAmelCase__ : Optional[Any] = image_processor(images=__UpperCamelCase , return_tensors="pt" ).to(__UpperCamelCase )
# prepare a noise vector that will be also used for testing the TF model
# (this way we can ensure that the PT and TF models operate on the same inputs)
UpperCAmelCase__ : List[Any] = ViTMAEConfig()
UpperCAmelCase__ : str = int((vit_mae_config.image_size // vit_mae_config.patch_size) ** 2 )
UpperCAmelCase__ : Optional[int] = np.random.uniform(size=(1, num_patches) )
# forward pass
with torch.no_grad():
UpperCAmelCase__ : List[str] = model(**__UpperCamelCase , noise=torch.from_numpy(__UpperCamelCase ).to(device=__UpperCamelCase ) )
# verify the logits
UpperCAmelCase__ : str = torch.Size((1, 1_96, 7_68) )
self.assertEqual(outputs.logits.shape , __UpperCamelCase )
UpperCAmelCase__ : List[Any] = torch.tensor(
[[-0.0548, -1.7023, -0.9325], [0.3721, -0.5670, -0.2233], [0.8235, -1.3878, -0.3524]] )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3] , expected_slice.to(__UpperCamelCase ) , atol=1E-4 ) )
| 660 | 1 |
"""simple docstring"""
from typing import Optional
import torch
import torch.utils.checkpoint
from torch import Tensor, nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACTaFN
from ...file_utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward
from ...modeling_outputs import (
BaseModelOutputWithNoAttention,
BaseModelOutputWithPoolingAndNoAttention,
ImageClassifierOutputWithNoAttention,
)
from ...modeling_utils import PreTrainedModel
from ...utils import logging
from .configuration_regnet import RegNetConfig
A__ : Union[str, Any] = logging.get_logger(__name__)
# General docstring
A__ : Tuple = """RegNetConfig"""
# Base docstring
A__ : List[Any] = """facebook/regnet-y-040"""
A__ : Union[str, Any] = [1, 1_088, 7, 7]
# Image classification docstring
A__ : List[Any] = """facebook/regnet-y-040"""
A__ : List[str] = """tabby, tabby cat"""
A__ : List[Any] = [
"""facebook/regnet-y-040""",
# See all regnet models at https://huggingface.co/models?filter=regnet
]
class _lowercase ( nn.Module ):
'''simple docstring'''
def __init__( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = 3 , __UpperCamelCase = 1 , __UpperCamelCase = 1 , __UpperCamelCase = "relu" , )-> Any:
super().__init__()
UpperCAmelCase__ : Union[str, Any] = nn.Convad(
__UpperCamelCase , __UpperCamelCase , kernel_size=__UpperCamelCase , stride=__UpperCamelCase , padding=kernel_size // 2 , groups=__UpperCamelCase , bias=__UpperCamelCase , )
UpperCAmelCase__ : Optional[Any] = nn.BatchNormad(__UpperCamelCase )
UpperCAmelCase__ : List[Any] = ACTaFN[activation] if activation is not None else nn.Identity()
def lowerCAmelCase__ ( self , __UpperCamelCase )-> Any:
UpperCAmelCase__ : List[str] = self.convolution(__UpperCamelCase )
UpperCAmelCase__ : Optional[int] = self.normalization(__UpperCamelCase )
UpperCAmelCase__ : Any = self.activation(__UpperCamelCase )
return hidden_state
class _lowercase ( nn.Module ):
'''simple docstring'''
def __init__( self , __UpperCamelCase )-> str:
super().__init__()
UpperCAmelCase__ : Dict = RegNetConvLayer(
config.num_channels , config.embedding_size , kernel_size=3 , stride=2 , activation=config.hidden_act )
UpperCAmelCase__ : Optional[int] = config.num_channels
def lowerCAmelCase__ ( self , __UpperCamelCase )-> Any:
UpperCAmelCase__ : Dict = pixel_values.shape[1]
if num_channels != self.num_channels:
raise ValueError(
"Make sure that the channel dimension of the pixel values match with the one set in the configuration." )
UpperCAmelCase__ : Optional[Any] = self.embedder(__UpperCamelCase )
return hidden_state
class _lowercase ( nn.Module ):
'''simple docstring'''
def __init__( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = 2 )-> Union[str, Any]:
super().__init__()
UpperCAmelCase__ : Optional[Any] = nn.Convad(__UpperCamelCase , __UpperCamelCase , kernel_size=1 , stride=__UpperCamelCase , bias=__UpperCamelCase )
UpperCAmelCase__ : int = nn.BatchNormad(__UpperCamelCase )
def lowerCAmelCase__ ( self , __UpperCamelCase )-> Tensor:
UpperCAmelCase__ : List[str] = self.convolution(__UpperCamelCase )
UpperCAmelCase__ : str = self.normalization(__UpperCamelCase )
return hidden_state
class _lowercase ( nn.Module ):
'''simple docstring'''
def __init__( self , __UpperCamelCase , __UpperCamelCase )-> Optional[int]:
super().__init__()
UpperCAmelCase__ : Union[str, Any] = nn.AdaptiveAvgPoolad((1, 1) )
UpperCAmelCase__ : Union[str, Any] = nn.Sequential(
nn.Convad(__UpperCamelCase , __UpperCamelCase , kernel_size=1 ) , nn.ReLU() , nn.Convad(__UpperCamelCase , __UpperCamelCase , kernel_size=1 ) , nn.Sigmoid() , )
def lowerCAmelCase__ ( self , __UpperCamelCase )-> int:
# b c h w -> b c 1 1
UpperCAmelCase__ : Union[str, Any] = self.pooler(__UpperCamelCase )
UpperCAmelCase__ : Any = self.attention(__UpperCamelCase )
UpperCAmelCase__ : List[str] = hidden_state * attention
return hidden_state
class _lowercase ( nn.Module ):
'''simple docstring'''
def __init__( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = 1 )-> List[str]:
super().__init__()
UpperCAmelCase__ : Tuple = in_channels != out_channels or stride != 1
UpperCAmelCase__ : List[str] = max(1 , out_channels // config.groups_width )
UpperCAmelCase__ : str = (
RegNetShortCut(__UpperCamelCase , __UpperCamelCase , stride=__UpperCamelCase ) if should_apply_shortcut else nn.Identity()
)
UpperCAmelCase__ : List[Any] = nn.Sequential(
RegNetConvLayer(__UpperCamelCase , __UpperCamelCase , kernel_size=1 , activation=config.hidden_act ) , RegNetConvLayer(__UpperCamelCase , __UpperCamelCase , stride=__UpperCamelCase , groups=__UpperCamelCase , activation=config.hidden_act ) , RegNetConvLayer(__UpperCamelCase , __UpperCamelCase , kernel_size=1 , activation=__UpperCamelCase ) , )
UpperCAmelCase__ : str = ACTaFN[config.hidden_act]
def lowerCAmelCase__ ( self , __UpperCamelCase )-> Dict:
UpperCAmelCase__ : Optional[Any] = hidden_state
UpperCAmelCase__ : Dict = self.layer(__UpperCamelCase )
UpperCAmelCase__ : Optional[Any] = self.shortcut(__UpperCamelCase )
hidden_state += residual
UpperCAmelCase__ : Tuple = self.activation(__UpperCamelCase )
return hidden_state
class _lowercase ( nn.Module ):
'''simple docstring'''
def __init__( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = 1 )-> str:
super().__init__()
UpperCAmelCase__ : Any = in_channels != out_channels or stride != 1
UpperCAmelCase__ : str = max(1 , out_channels // config.groups_width )
UpperCAmelCase__ : Union[str, Any] = (
RegNetShortCut(__UpperCamelCase , __UpperCamelCase , stride=__UpperCamelCase ) if should_apply_shortcut else nn.Identity()
)
UpperCAmelCase__ : List[str] = nn.Sequential(
RegNetConvLayer(__UpperCamelCase , __UpperCamelCase , kernel_size=1 , activation=config.hidden_act ) , RegNetConvLayer(__UpperCamelCase , __UpperCamelCase , stride=__UpperCamelCase , groups=__UpperCamelCase , activation=config.hidden_act ) , RegNetSELayer(__UpperCamelCase , reduced_channels=int(round(in_channels / 4 ) ) ) , RegNetConvLayer(__UpperCamelCase , __UpperCamelCase , kernel_size=1 , activation=__UpperCamelCase ) , )
UpperCAmelCase__ : List[str] = ACTaFN[config.hidden_act]
def lowerCAmelCase__ ( self , __UpperCamelCase )-> Tuple:
UpperCAmelCase__ : Any = hidden_state
UpperCAmelCase__ : Optional[Any] = self.layer(__UpperCamelCase )
UpperCAmelCase__ : Union[str, Any] = self.shortcut(__UpperCamelCase )
hidden_state += residual
UpperCAmelCase__ : str = self.activation(__UpperCamelCase )
return hidden_state
class _lowercase ( nn.Module ):
'''simple docstring'''
def __init__( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = 2 , __UpperCamelCase = 2 , )-> str:
super().__init__()
UpperCAmelCase__ : Optional[int] = RegNetXLayer if config.layer_type == "x" else RegNetYLayer
UpperCAmelCase__ : Any = nn.Sequential(
# downsampling is done in the first layer with stride of 2
layer(
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , stride=__UpperCamelCase , ) , *[layer(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) for _ in range(depth - 1 )] , )
def lowerCAmelCase__ ( self , __UpperCamelCase )-> Optional[int]:
UpperCAmelCase__ : List[Any] = self.layers(__UpperCamelCase )
return hidden_state
class _lowercase ( nn.Module ):
'''simple docstring'''
def __init__( self , __UpperCamelCase )-> Tuple:
super().__init__()
UpperCAmelCase__ : int = nn.ModuleList([] )
# based on `downsample_in_first_stage`, the first layer of the first stage may or may not downsample the input
self.stages.append(
RegNetStage(
__UpperCamelCase , config.embedding_size , config.hidden_sizes[0] , stride=2 if config.downsample_in_first_stage else 1 , depth=config.depths[0] , ) )
UpperCAmelCase__ : int = zip(config.hidden_sizes , config.hidden_sizes[1:] )
for (in_channels, out_channels), depth in zip(__UpperCamelCase , config.depths[1:] ):
self.stages.append(RegNetStage(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , depth=__UpperCamelCase ) )
def lowerCAmelCase__ ( self , __UpperCamelCase , __UpperCamelCase = False , __UpperCamelCase = True )-> BaseModelOutputWithNoAttention:
UpperCAmelCase__ : Dict = () if output_hidden_states else None
for stage_module in self.stages:
if output_hidden_states:
UpperCAmelCase__ : Tuple = hidden_states + (hidden_state,)
UpperCAmelCase__ : Optional[Any] = stage_module(__UpperCamelCase )
if output_hidden_states:
UpperCAmelCase__ : Tuple = hidden_states + (hidden_state,)
if not return_dict:
return tuple(v for v in [hidden_state, hidden_states] if v is not None )
return BaseModelOutputWithNoAttention(last_hidden_state=__UpperCamelCase , hidden_states=__UpperCamelCase )
class _lowercase ( lowerCAmelCase_ ):
'''simple docstring'''
_A = RegNetConfig
_A = 'regnet'
_A = 'pixel_values'
_A = True
def lowerCAmelCase__ ( self , __UpperCamelCase )-> str:
if isinstance(__UpperCamelCase , nn.Convad ):
nn.init.kaiming_normal_(module.weight , mode="fan_out" , nonlinearity="relu" )
elif isinstance(__UpperCamelCase , (nn.BatchNormad, nn.GroupNorm) ):
nn.init.constant_(module.weight , 1 )
nn.init.constant_(module.bias , 0 )
def lowerCAmelCase__ ( self , __UpperCamelCase , __UpperCamelCase=False )-> str:
if isinstance(__UpperCamelCase , __UpperCamelCase ):
UpperCAmelCase__ : str = value
A__ : Union[str, Any] = R"""
This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it
as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and
behavior.
Parameters:
config ([`RegNetConfig`]): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
"""
A__ : List[str] = R"""
Args:
pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See
[`ConvNextImageProcessor.__call__`] for details.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~file_utils.ModelOutput`] instead of a plain tuple.
"""
@add_start_docstrings(
'The bare RegNet model outputting raw features without any specific head on top.' , lowerCAmelCase_ , )
# Copied from transformers.models.resnet.modeling_resnet.ResNetModel with RESNET->REGNET,ResNet->RegNet
class _lowercase ( lowerCAmelCase_ ):
'''simple docstring'''
def __init__( self , __UpperCamelCase )-> Tuple:
super().__init__(__UpperCamelCase )
UpperCAmelCase__ : List[str] = config
UpperCAmelCase__ : Dict = RegNetEmbeddings(__UpperCamelCase )
UpperCAmelCase__ : List[str] = RegNetEncoder(__UpperCamelCase )
UpperCAmelCase__ : List[str] = nn.AdaptiveAvgPoolad((1, 1) )
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(__UpperCamelCase )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=__UpperCamelCase , config_class=_CONFIG_FOR_DOC , modality="vision" , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def lowerCAmelCase__ ( self , __UpperCamelCase , __UpperCamelCase = None , __UpperCamelCase = None )-> BaseModelOutputWithPoolingAndNoAttention:
UpperCAmelCase__ : Dict = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
UpperCAmelCase__ : Any = return_dict if return_dict is not None else self.config.use_return_dict
UpperCAmelCase__ : Optional[Any] = self.embedder(__UpperCamelCase )
UpperCAmelCase__ : Any = self.encoder(
__UpperCamelCase , output_hidden_states=__UpperCamelCase , return_dict=__UpperCamelCase )
UpperCAmelCase__ : Dict = encoder_outputs[0]
UpperCAmelCase__ : Tuple = self.pooler(__UpperCamelCase )
if not return_dict:
return (last_hidden_state, pooled_output) + encoder_outputs[1:]
return BaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=__UpperCamelCase , pooler_output=__UpperCamelCase , hidden_states=encoder_outputs.hidden_states , )
@add_start_docstrings(
'\n RegNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for\n ImageNet.\n ' , lowerCAmelCase_ , )
# Copied from transformers.models.resnet.modeling_resnet.ResNetForImageClassification with RESNET->REGNET,ResNet->RegNet,resnet->regnet
class _lowercase ( lowerCAmelCase_ ):
'''simple docstring'''
def __init__( self , __UpperCamelCase )-> List[Any]:
super().__init__(__UpperCamelCase )
UpperCAmelCase__ : str = config.num_labels
UpperCAmelCase__ : List[Any] = RegNetModel(__UpperCamelCase )
# classification head
UpperCAmelCase__ : Any = nn.Sequential(
nn.Flatten() , nn.Linear(config.hidden_sizes[-1] , config.num_labels ) if config.num_labels > 0 else nn.Identity() , )
# initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(__UpperCamelCase )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=__UpperCamelCase , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def lowerCAmelCase__ ( self , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , )-> ImageClassifierOutputWithNoAttention:
UpperCAmelCase__ : Union[str, Any] = return_dict if return_dict is not None else self.config.use_return_dict
UpperCAmelCase__ : Any = self.regnet(__UpperCamelCase , output_hidden_states=__UpperCamelCase , return_dict=__UpperCamelCase )
UpperCAmelCase__ : List[str] = outputs.pooler_output if return_dict else outputs[1]
UpperCAmelCase__ : Optional[Any] = self.classifier(__UpperCamelCase )
UpperCAmelCase__ : Union[str, Any] = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
UpperCAmelCase__ : Optional[int] = "regression"
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
UpperCAmelCase__ : str = "single_label_classification"
else:
UpperCAmelCase__ : Tuple = "multi_label_classification"
if self.config.problem_type == "regression":
UpperCAmelCase__ : Tuple = MSELoss()
if self.num_labels == 1:
UpperCAmelCase__ : Dict = loss_fct(logits.squeeze() , labels.squeeze() )
else:
UpperCAmelCase__ : int = loss_fct(__UpperCamelCase , __UpperCamelCase )
elif self.config.problem_type == "single_label_classification":
UpperCAmelCase__ : Optional[int] = CrossEntropyLoss()
UpperCAmelCase__ : List[str] = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
elif self.config.problem_type == "multi_label_classification":
UpperCAmelCase__ : Optional[Any] = BCEWithLogitsLoss()
UpperCAmelCase__ : Dict = loss_fct(__UpperCamelCase , __UpperCamelCase )
if not return_dict:
UpperCAmelCase__ : Any = (logits,) + outputs[2:]
return (loss,) + output if loss is not None else output
return ImageClassifierOutputWithNoAttention(loss=__UpperCamelCase , logits=__UpperCamelCase , hidden_states=outputs.hidden_states )
| 660 |
"""simple docstring"""
# DISCLAIMER: This file is strongly influenced by https://github.com/ermongroup/ddim
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import flax
import jax
import jax.numpy as jnp
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils_flax import (
CommonSchedulerState,
FlaxKarrasDiffusionSchedulers,
FlaxSchedulerMixin,
FlaxSchedulerOutput,
add_noise_common,
get_velocity_common,
)
@flax.struct.dataclass
class _lowercase :
'''simple docstring'''
_A = 42
# setable values
_A = 42
_A = 42
_A = None
@classmethod
def lowerCAmelCase__ ( cls , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )-> Union[str, Any]:
return cls(common=__UpperCamelCase , init_noise_sigma=__UpperCamelCase , timesteps=__UpperCamelCase )
@dataclass
class _lowercase ( lowerCAmelCase_ ):
'''simple docstring'''
_A = 42
class _lowercase ( lowerCAmelCase_ , lowerCAmelCase_ ):
'''simple docstring'''
_A = [e.name for e in FlaxKarrasDiffusionSchedulers]
_A = 42
@property
def lowerCAmelCase__ ( self )-> Optional[int]:
return True
@register_to_config
def __init__( self , __UpperCamelCase = 10_00 , __UpperCamelCase = 0.0001 , __UpperCamelCase = 0.02 , __UpperCamelCase = "linear" , __UpperCamelCase = None , __UpperCamelCase = "fixed_small" , __UpperCamelCase = True , __UpperCamelCase = "epsilon" , __UpperCamelCase = jnp.floataa , )-> List[str]:
UpperCAmelCase__ : int = dtype
def lowerCAmelCase__ ( self , __UpperCamelCase = None )-> DDPMSchedulerState:
if common is None:
UpperCAmelCase__ : int = CommonSchedulerState.create(self )
# standard deviation of the initial noise distribution
UpperCAmelCase__ : Tuple = jnp.array(1.0 , dtype=self.dtype )
UpperCAmelCase__ : Tuple = jnp.arange(0 , self.config.num_train_timesteps ).round()[::-1]
return DDPMSchedulerState.create(
common=__UpperCamelCase , init_noise_sigma=__UpperCamelCase , timesteps=__UpperCamelCase , )
def lowerCAmelCase__ ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = None )-> jnp.ndarray:
return sample
def lowerCAmelCase__ ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = () )-> DDPMSchedulerState:
UpperCAmelCase__ : Dict = self.config.num_train_timesteps // num_inference_steps
# creates integer timesteps by multiplying by ratio
# rounding to avoid issues when num_inference_step is power of 3
UpperCAmelCase__ : Optional[int] = (jnp.arange(0 , __UpperCamelCase ) * step_ratio).round()[::-1]
return state.replace(
num_inference_steps=__UpperCamelCase , timesteps=__UpperCamelCase , )
def lowerCAmelCase__ ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase=None , __UpperCamelCase=None )-> Union[str, Any]:
UpperCAmelCase__ : Optional[Any] = state.common.alphas_cumprod[t]
UpperCAmelCase__ : int = jnp.where(t > 0 , state.common.alphas_cumprod[t - 1] , jnp.array(1.0 , dtype=self.dtype ) )
# For t > 0, compute predicted variance βt (see formula (6) and (7) from https://arxiv.org/pdf/2006.11239.pdf)
# and sample from it to get previous sample
# x_{t-1} ~ N(pred_prev_sample, variance) == add variance to pred_sample
UpperCAmelCase__ : Any = (1 - alpha_prod_t_prev) / (1 - alpha_prod_t) * state.common.betas[t]
if variance_type is None:
UpperCAmelCase__ : Union[str, Any] = self.config.variance_type
# hacks - were probably added for training stability
if variance_type == "fixed_small":
UpperCAmelCase__ : Dict = jnp.clip(__UpperCamelCase , a_min=1E-20 )
# for rl-diffuser https://arxiv.org/abs/2205.09991
elif variance_type == "fixed_small_log":
UpperCAmelCase__ : Tuple = jnp.log(jnp.clip(__UpperCamelCase , a_min=1E-20 ) )
elif variance_type == "fixed_large":
UpperCAmelCase__ : Union[str, Any] = state.common.betas[t]
elif variance_type == "fixed_large_log":
# Glide max_log
UpperCAmelCase__ : Optional[int] = jnp.log(state.common.betas[t] )
elif variance_type == "learned":
return predicted_variance
elif variance_type == "learned_range":
UpperCAmelCase__ : List[str] = variance
UpperCAmelCase__ : Union[str, Any] = state.common.betas[t]
UpperCAmelCase__ : Optional[int] = (predicted_variance + 1) / 2
UpperCAmelCase__ : Any = frac * max_log + (1 - frac) * min_log
return variance
def lowerCAmelCase__ ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = None , __UpperCamelCase = True , )-> Union[FlaxDDPMSchedulerOutput, Tuple]:
UpperCAmelCase__ : List[str] = timestep
if key is None:
UpperCAmelCase__ : int = jax.random.PRNGKey(0 )
if model_output.shape[1] == sample.shape[1] * 2 and self.config.variance_type in ["learned", "learned_range"]:
UpperCAmelCase__ , UpperCAmelCase__ : List[str] = jnp.split(__UpperCamelCase , sample.shape[1] , axis=1 )
else:
UpperCAmelCase__ : Optional[Any] = None
# 1. compute alphas, betas
UpperCAmelCase__ : Union[str, Any] = state.common.alphas_cumprod[t]
UpperCAmelCase__ : Tuple = jnp.where(t > 0 , state.common.alphas_cumprod[t - 1] , jnp.array(1.0 , dtype=self.dtype ) )
UpperCAmelCase__ : Union[str, Any] = 1 - alpha_prod_t
UpperCAmelCase__ : Tuple = 1 - alpha_prod_t_prev
# 2. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf
if self.config.prediction_type == "epsilon":
UpperCAmelCase__ : List[Any] = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
elif self.config.prediction_type == "sample":
UpperCAmelCase__ : Any = model_output
elif self.config.prediction_type == "v_prediction":
UpperCAmelCase__ : Union[str, Any] = (alpha_prod_t**0.5) * sample - (beta_prod_t**0.5) * model_output
else:
raise ValueError(
F"prediction_type given as {self.config.prediction_type} must be one of `epsilon`, `sample` "
" for the FlaxDDPMScheduler." )
# 3. Clip "predicted x_0"
if self.config.clip_sample:
UpperCAmelCase__ : List[Any] = jnp.clip(__UpperCamelCase , -1 , 1 )
# 4. Compute coefficients for pred_original_sample x_0 and current sample x_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
UpperCAmelCase__ : List[str] = (alpha_prod_t_prev ** 0.5 * state.common.betas[t]) / beta_prod_t
UpperCAmelCase__ : List[Any] = state.common.alphas[t] ** 0.5 * beta_prod_t_prev / beta_prod_t
# 5. Compute predicted previous sample µ_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
UpperCAmelCase__ : Tuple = pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample
# 6. Add noise
def random_variance():
UpperCAmelCase__ : Any = jax.random.split(__UpperCamelCase , num=1 )
UpperCAmelCase__ : int = jax.random.normal(__UpperCamelCase , shape=model_output.shape , dtype=self.dtype )
return (self._get_variance(__UpperCamelCase , __UpperCamelCase , predicted_variance=__UpperCamelCase ) ** 0.5) * noise
UpperCAmelCase__ : Dict = jnp.where(t > 0 , random_variance() , jnp.zeros(model_output.shape , dtype=self.dtype ) )
UpperCAmelCase__ : Any = pred_prev_sample + variance
if not return_dict:
return (pred_prev_sample, state)
return FlaxDDPMSchedulerOutput(prev_sample=__UpperCamelCase , state=__UpperCamelCase )
def lowerCAmelCase__ ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , )-> jnp.ndarray:
return add_noise_common(state.common , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
def lowerCAmelCase__ ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , )-> jnp.ndarray:
return get_velocity_common(state.common , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
def __len__( self )-> Tuple:
return self.config.num_train_timesteps
| 660 | 1 |
"""simple docstring"""
import numpy as np
def a__ ( lowerCAmelCase : np.array ):
'''simple docstring'''
return (2 / (1 + np.exp(-2 * vector ))) - 1
if __name__ == "__main__":
import doctest
doctest.testmod()
| 660 |
"""simple docstring"""
from pathlib import PurePosixPath
from typing import Optional
import fsspec
from fsspec import AbstractFileSystem
from huggingface_hub.hf_api import DatasetInfo
from ..utils.file_utils import get_authentication_headers_for_url
from ..utils.hub import hf_hub_url
class _lowercase ( lowerCAmelCase_ ):
'''simple docstring'''
_A = ''
_A = 'hf-legacy' # "hf://"" is reserved for hffs
def __init__( self , __UpperCamelCase = None , __UpperCamelCase = None , **__UpperCamelCase , )-> str:
super().__init__(self , **__UpperCamelCase )
UpperCAmelCase__ : int = repo_info
UpperCAmelCase__ : Optional[int] = token
UpperCAmelCase__ : Optional[Any] = None
def lowerCAmelCase__ ( self )-> Optional[Any]:
if self.dir_cache is None:
UpperCAmelCase__ : str = {}
for hf_file in self.repo_info.siblings:
# TODO(QL): add sizes
UpperCAmelCase__ : str = {
"name": hf_file.rfilename,
"size": None,
"type": "file",
}
self.dir_cache.update(
{
str(__UpperCamelCase ): {"name": str(__UpperCamelCase ), "size": None, "type": "directory"}
for d in list(PurePosixPath(hf_file.rfilename ).parents )[:-1]
} )
def lowerCAmelCase__ ( self , __UpperCamelCase , __UpperCamelCase = "rb" , **__UpperCamelCase , )-> List[Any]:
if not isinstance(self.repo_info , __UpperCamelCase ):
raise NotImplementedError(F"Open is only implemented for dataset repositories, but got {self.repo_info}" )
UpperCAmelCase__ : Union[str, Any] = hf_hub_url(self.repo_info.id , __UpperCamelCase , revision=self.repo_info.sha )
return fsspec.open(
__UpperCamelCase , mode=__UpperCamelCase , headers=get_authentication_headers_for_url(__UpperCamelCase , use_auth_token=self.token ) , client_kwargs={"trust_env": True} , ).open()
def lowerCAmelCase__ ( self , __UpperCamelCase , **__UpperCamelCase )-> List[str]:
self._get_dirs()
UpperCAmelCase__ : Union[str, Any] = self._strip_protocol(__UpperCamelCase )
if path in self.dir_cache:
return self.dir_cache[path]
else:
raise FileNotFoundError(__UpperCamelCase )
def lowerCAmelCase__ ( self , __UpperCamelCase , __UpperCamelCase=False , **__UpperCamelCase )-> str:
self._get_dirs()
UpperCAmelCase__ : str = PurePosixPath(path.strip("/" ) )
UpperCAmelCase__ : Optional[Any] = {}
for p, f in self.dir_cache.items():
UpperCAmelCase__ : Optional[int] = PurePosixPath(p.strip("/" ) )
UpperCAmelCase__ : Dict = p.parent
if root == path:
UpperCAmelCase__ : Tuple = f
UpperCAmelCase__ : List[Any] = list(paths.values() )
if detail:
return out
else:
return sorted(f["name"] for f in out )
| 660 | 1 |
"""simple docstring"""
import numpy as np
import torch
from torch.utils.data import DataLoader
from accelerate.utils.dataclasses import DistributedType
class _lowercase :
'''simple docstring'''
def __init__( self , __UpperCamelCase=2 , __UpperCamelCase=3 , __UpperCamelCase=64 , __UpperCamelCase=None )-> Tuple:
UpperCAmelCase__ : Tuple = np.random.default_rng(__UpperCamelCase )
UpperCAmelCase__ : str = length
UpperCAmelCase__ : Optional[Any] = rng.normal(size=(length,) ).astype(np.floataa )
UpperCAmelCase__ : str = a * self.x + b + rng.normal(scale=0.1 , size=(length,) ).astype(np.floataa )
def __len__( self )-> int:
return self.length
def __getitem__( self , __UpperCamelCase )-> Any:
return {"x": self.x[i], "y": self.y[i]}
class _lowercase ( torch.nn.Module ):
'''simple docstring'''
def __init__( self , __UpperCamelCase=0 , __UpperCamelCase=0 , __UpperCamelCase=False )-> Dict:
super().__init__()
UpperCAmelCase__ : Optional[Any] = torch.nn.Parameter(torch.tensor([2, 3] ).float() )
UpperCAmelCase__ : int = torch.nn.Parameter(torch.tensor([2, 3] ).float() )
UpperCAmelCase__ : Optional[Any] = True
def lowerCAmelCase__ ( self , __UpperCamelCase=None )-> Union[str, Any]:
if self.first_batch:
print(F"Model dtype: {self.a.dtype}, {self.b.dtype}. Input dtype: {x.dtype}" )
UpperCAmelCase__ : Any = False
return x * self.a[0] + self.b[0]
class _lowercase ( torch.nn.Module ):
'''simple docstring'''
def __init__( self , __UpperCamelCase=0 , __UpperCamelCase=0 , __UpperCamelCase=False )-> Tuple:
super().__init__()
UpperCAmelCase__ : Any = torch.nn.Parameter(torch.tensor(__UpperCamelCase ).float() )
UpperCAmelCase__ : str = torch.nn.Parameter(torch.tensor(__UpperCamelCase ).float() )
UpperCAmelCase__ : Tuple = True
def lowerCAmelCase__ ( self , __UpperCamelCase=None )-> Optional[Any]:
if self.first_batch:
print(F"Model dtype: {self.a.dtype}, {self.b.dtype}. Input dtype: {x.dtype}" )
UpperCAmelCase__ : Dict = False
return x * self.a + self.b
def a__ ( lowerCAmelCase : List[Any] , lowerCAmelCase : int = 16 ):
'''simple docstring'''
from datasets import load_dataset
from transformers import AutoTokenizer
UpperCAmelCase__ : int = AutoTokenizer.from_pretrained("bert-base-cased" )
UpperCAmelCase__ : List[Any] = {"train": "tests/test_samples/MRPC/train.csv", "validation": "tests/test_samples/MRPC/dev.csv"}
UpperCAmelCase__ : Any = load_dataset("csv" , data_files=lowerCAmelCase )
UpperCAmelCase__ : Optional[Any] = datasets["train"].unique("label" )
UpperCAmelCase__ : str = {v: i for i, v in enumerate(lowerCAmelCase )}
def tokenize_function(lowerCAmelCase : str ):
# max_length=None => use the model max length (it's actually the default)
UpperCAmelCase__ : List[str] = tokenizer(
examples["sentence1"] , examples["sentence2"] , truncation=lowerCAmelCase , max_length=lowerCAmelCase , padding="max_length" )
if "label" in examples:
UpperCAmelCase__ : Tuple = [label_to_id[l] for l in examples["label"]]
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
UpperCAmelCase__ : Dict = datasets.map(
lowerCAmelCase , batched=lowerCAmelCase , remove_columns=["sentence1", "sentence2", "label"] , )
def collate_fn(lowerCAmelCase : str ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(lowerCAmelCase , padding="max_length" , max_length=128 , return_tensors="pt" )
return tokenizer.pad(lowerCAmelCase , padding="longest" , return_tensors="pt" )
# Instantiate dataloaders.
UpperCAmelCase__ : Tuple = DataLoader(tokenized_datasets["train"] , shuffle=lowerCAmelCase , collate_fn=lowerCAmelCase , batch_size=2 )
UpperCAmelCase__ : Union[str, Any] = DataLoader(tokenized_datasets["validation"] , shuffle=lowerCAmelCase , collate_fn=lowerCAmelCase , batch_size=1 )
return train_dataloader, eval_dataloader
| 660 |
"""simple docstring"""
from typing import Dict, List, Optional, Union
import numpy as np
from transformers.utils import is_vision_available
from transformers.utils.generic import TensorType
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
is_valid_image,
to_numpy_array,
valid_images,
)
from ...utils import logging
if is_vision_available():
import PIL
A__ : Dict = logging.get_logger(__name__)
def a__ ( lowerCAmelCase : Optional[Any] ):
'''simple docstring'''
if isinstance(lowerCAmelCase , (list, tuple) ) and isinstance(videos[0] , (list, tuple) ) and is_valid_image(videos[0][0] ):
return videos
elif isinstance(lowerCAmelCase , (list, tuple) ) and is_valid_image(videos[0] ):
return [videos]
elif is_valid_image(lowerCAmelCase ):
return [[videos]]
raise ValueError(F"Could not make batched video from {videos}" )
class _lowercase ( lowerCAmelCase_ ):
'''simple docstring'''
_A = ['pixel_values']
def __init__( self , __UpperCamelCase = True , __UpperCamelCase = None , __UpperCamelCase = PILImageResampling.BILINEAR , __UpperCamelCase = True , __UpperCamelCase = None , __UpperCamelCase = True , __UpperCamelCase = 1 / 2_55 , __UpperCamelCase = True , __UpperCamelCase = True , __UpperCamelCase = None , __UpperCamelCase = None , **__UpperCamelCase , )-> None:
super().__init__(**__UpperCamelCase )
UpperCAmelCase__ : Optional[Any] = size if size is not None else {"shortest_edge": 2_56}
UpperCAmelCase__ : List[Any] = get_size_dict(__UpperCamelCase , default_to_square=__UpperCamelCase )
UpperCAmelCase__ : List[str] = crop_size if crop_size is not None else {"height": 2_24, "width": 2_24}
UpperCAmelCase__ : int = get_size_dict(__UpperCamelCase , param_name="crop_size" )
UpperCAmelCase__ : Dict = do_resize
UpperCAmelCase__ : Optional[int] = size
UpperCAmelCase__ : List[Any] = do_center_crop
UpperCAmelCase__ : str = crop_size
UpperCAmelCase__ : Optional[int] = resample
UpperCAmelCase__ : int = do_rescale
UpperCAmelCase__ : Union[str, Any] = rescale_factor
UpperCAmelCase__ : Union[str, Any] = offset
UpperCAmelCase__ : Dict = do_normalize
UpperCAmelCase__ : int = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
UpperCAmelCase__ : Union[str, Any] = image_std if image_std is not None else IMAGENET_STANDARD_STD
def lowerCAmelCase__ ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = PILImageResampling.BILINEAR , __UpperCamelCase = None , **__UpperCamelCase , )-> np.ndarray:
UpperCAmelCase__ : Optional[int] = get_size_dict(__UpperCamelCase , default_to_square=__UpperCamelCase )
if "shortest_edge" in size:
UpperCAmelCase__ : Union[str, Any] = get_resize_output_image_size(__UpperCamelCase , size["shortest_edge"] , default_to_square=__UpperCamelCase )
elif "height" in size and "width" in size:
UpperCAmelCase__ : Any = (size["height"], size["width"])
else:
raise ValueError(F"Size must have 'height' and 'width' or 'shortest_edge' as keys. Got {size.keys()}" )
return resize(__UpperCamelCase , size=__UpperCamelCase , resample=__UpperCamelCase , data_format=__UpperCamelCase , **__UpperCamelCase )
def lowerCAmelCase__ ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = None , **__UpperCamelCase , )-> np.ndarray:
UpperCAmelCase__ : Optional[Any] = get_size_dict(__UpperCamelCase )
if "height" not in size or "width" not in size:
raise ValueError(F"Size must have 'height' and 'width' as keys. Got {size.keys()}" )
return center_crop(__UpperCamelCase , size=(size["height"], size["width"]) , data_format=__UpperCamelCase , **__UpperCamelCase )
def lowerCAmelCase__ ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = True , __UpperCamelCase = None , **__UpperCamelCase , )-> Tuple:
UpperCAmelCase__ : str = image.astype(np.floataa )
if offset:
UpperCAmelCase__ : Tuple = image - (scale / 2)
return rescale(__UpperCamelCase , scale=__UpperCamelCase , data_format=__UpperCamelCase , **__UpperCamelCase )
def lowerCAmelCase__ ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = None , **__UpperCamelCase , )-> np.ndarray:
return normalize(__UpperCamelCase , mean=__UpperCamelCase , std=__UpperCamelCase , data_format=__UpperCamelCase , **__UpperCamelCase )
def lowerCAmelCase__ ( self , __UpperCamelCase , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = ChannelDimension.FIRST , )-> np.ndarray:
if do_resize and size is None or resample is None:
raise ValueError("Size and resample must be specified if do_resize is True." )
if do_center_crop and crop_size is None:
raise ValueError("Crop size must be specified if do_center_crop is True." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("Image mean and std must be specified if do_normalize is True." )
if offset and not do_rescale:
raise ValueError("For offset, do_rescale must also be set to True." )
# All transformations expect numpy arrays.
UpperCAmelCase__ : Optional[Any] = to_numpy_array(__UpperCamelCase )
if do_resize:
UpperCAmelCase__ : Union[str, Any] = self.resize(image=__UpperCamelCase , size=__UpperCamelCase , resample=__UpperCamelCase )
if do_center_crop:
UpperCAmelCase__ : int = self.center_crop(__UpperCamelCase , size=__UpperCamelCase )
if do_rescale:
UpperCAmelCase__ : List[str] = self.rescale(image=__UpperCamelCase , scale=__UpperCamelCase , offset=__UpperCamelCase )
if do_normalize:
UpperCAmelCase__ : List[Any] = self.normalize(image=__UpperCamelCase , mean=__UpperCamelCase , std=__UpperCamelCase )
UpperCAmelCase__ : Dict = to_channel_dimension_format(__UpperCamelCase , __UpperCamelCase )
return image
def lowerCAmelCase__ ( self , __UpperCamelCase , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = ChannelDimension.FIRST , **__UpperCamelCase , )-> PIL.Image.Image:
UpperCAmelCase__ : Optional[int] = do_resize if do_resize is not None else self.do_resize
UpperCAmelCase__ : int = resample if resample is not None else self.resample
UpperCAmelCase__ : Tuple = do_center_crop if do_center_crop is not None else self.do_center_crop
UpperCAmelCase__ : int = do_rescale if do_rescale is not None else self.do_rescale
UpperCAmelCase__ : Any = rescale_factor if rescale_factor is not None else self.rescale_factor
UpperCAmelCase__ : Optional[int] = offset if offset is not None else self.offset
UpperCAmelCase__ : Dict = do_normalize if do_normalize is not None else self.do_normalize
UpperCAmelCase__ : Dict = image_mean if image_mean is not None else self.image_mean
UpperCAmelCase__ : Optional[int] = image_std if image_std is not None else self.image_std
UpperCAmelCase__ : List[str] = size if size is not None else self.size
UpperCAmelCase__ : Optional[int] = get_size_dict(__UpperCamelCase , default_to_square=__UpperCamelCase )
UpperCAmelCase__ : Dict = crop_size if crop_size is not None else self.crop_size
UpperCAmelCase__ : Tuple = get_size_dict(__UpperCamelCase , param_name="crop_size" )
if not valid_images(__UpperCamelCase ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
UpperCAmelCase__ : List[str] = make_batched(__UpperCamelCase )
UpperCAmelCase__ : Optional[Any] = [
[
self._preprocess_image(
image=__UpperCamelCase , do_resize=__UpperCamelCase , size=__UpperCamelCase , resample=__UpperCamelCase , do_center_crop=__UpperCamelCase , crop_size=__UpperCamelCase , do_rescale=__UpperCamelCase , rescale_factor=__UpperCamelCase , offset=__UpperCamelCase , do_normalize=__UpperCamelCase , image_mean=__UpperCamelCase , image_std=__UpperCamelCase , data_format=__UpperCamelCase , )
for img in video
]
for video in videos
]
UpperCAmelCase__ : Dict = {"pixel_values": videos}
return BatchFeature(data=__UpperCamelCase , tensor_type=__UpperCamelCase )
| 660 | 1 |
"""simple docstring"""
from __future__ import annotations
import os
import tempfile
import unittest
from transformers import ConvBertConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFConvBertForMaskedLM,
TFConvBertForMultipleChoice,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertModel,
)
class _lowercase :
'''simple docstring'''
def __init__( self , __UpperCamelCase , __UpperCamelCase=13 , __UpperCamelCase=7 , __UpperCamelCase=True , __UpperCamelCase=True , __UpperCamelCase=True , __UpperCamelCase=True , __UpperCamelCase=99 , __UpperCamelCase=32 , __UpperCamelCase=2 , __UpperCamelCase=4 , __UpperCamelCase=37 , __UpperCamelCase="gelu" , __UpperCamelCase=0.1 , __UpperCamelCase=0.1 , __UpperCamelCase=5_12 , __UpperCamelCase=16 , __UpperCamelCase=2 , __UpperCamelCase=0.02 , __UpperCamelCase=3 , __UpperCamelCase=4 , __UpperCamelCase=None , )-> List[str]:
UpperCAmelCase__ : Tuple = parent
UpperCAmelCase__ : int = 13
UpperCAmelCase__ : Union[str, Any] = 7
UpperCAmelCase__ : Tuple = True
UpperCAmelCase__ : Dict = True
UpperCAmelCase__ : Optional[int] = True
UpperCAmelCase__ : Union[str, Any] = True
UpperCAmelCase__ : Union[str, Any] = 99
UpperCAmelCase__ : Any = 3_84
UpperCAmelCase__ : Optional[int] = 2
UpperCAmelCase__ : Tuple = 4
UpperCAmelCase__ : List[str] = 37
UpperCAmelCase__ : Union[str, Any] = "gelu"
UpperCAmelCase__ : Dict = 0.1
UpperCAmelCase__ : Optional[int] = 0.1
UpperCAmelCase__ : Optional[int] = 5_12
UpperCAmelCase__ : List[str] = 16
UpperCAmelCase__ : Union[str, Any] = 2
UpperCAmelCase__ : List[str] = 0.02
UpperCAmelCase__ : Any = 3
UpperCAmelCase__ : Dict = 4
UpperCAmelCase__ : str = 1_28
UpperCAmelCase__ : List[str] = 2
UpperCAmelCase__ : List[str] = 9
UpperCAmelCase__ : Optional[Any] = 1
UpperCAmelCase__ : Any = None
def lowerCAmelCase__ ( self )-> Tuple:
UpperCAmelCase__ : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCAmelCase__ : Union[str, Any] = None
if self.use_input_mask:
UpperCAmelCase__ : Dict = random_attention_mask([self.batch_size, self.seq_length] )
UpperCAmelCase__ : Any = None
if self.use_token_type_ids:
UpperCAmelCase__ : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
UpperCAmelCase__ : Dict = None
UpperCAmelCase__ : Union[str, Any] = None
UpperCAmelCase__ : str = None
if self.use_labels:
UpperCAmelCase__ : List[str] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCAmelCase__ : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
UpperCAmelCase__ : Tuple = ids_tensor([self.batch_size] , self.num_choices )
UpperCAmelCase__ : List[str] = ConvBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , return_dict=__UpperCamelCase , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowerCAmelCase__ ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )-> Union[str, Any]:
UpperCAmelCase__ : Dict = TFConvBertModel(config=__UpperCamelCase )
UpperCAmelCase__ : Optional[int] = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
UpperCAmelCase__ : str = [input_ids, input_mask]
UpperCAmelCase__ : str = model(__UpperCamelCase )
UpperCAmelCase__ : str = model(__UpperCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCAmelCase__ ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )-> Optional[int]:
UpperCAmelCase__ : Dict = TFConvBertForMaskedLM(config=__UpperCamelCase )
UpperCAmelCase__ : str = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
UpperCAmelCase__ : Any = model(__UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowerCAmelCase__ ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )-> str:
UpperCAmelCase__ : Optional[Any] = self.num_labels
UpperCAmelCase__ : Tuple = TFConvBertForSequenceClassification(config=__UpperCamelCase )
UpperCAmelCase__ : Dict = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
UpperCAmelCase__ : Tuple = model(__UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCAmelCase__ ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )-> List[Any]:
UpperCAmelCase__ : List[str] = self.num_choices
UpperCAmelCase__ : str = TFConvBertForMultipleChoice(config=__UpperCamelCase )
UpperCAmelCase__ : Any = tf.tile(tf.expand_dims(__UpperCamelCase , 1 ) , (1, self.num_choices, 1) )
UpperCAmelCase__ : List[str] = tf.tile(tf.expand_dims(__UpperCamelCase , 1 ) , (1, self.num_choices, 1) )
UpperCAmelCase__ : List[Any] = tf.tile(tf.expand_dims(__UpperCamelCase , 1 ) , (1, self.num_choices, 1) )
UpperCAmelCase__ : Optional[Any] = {
"input_ids": multiple_choice_inputs_ids,
"attention_mask": multiple_choice_input_mask,
"token_type_ids": multiple_choice_token_type_ids,
}
UpperCAmelCase__ : Dict = model(__UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def lowerCAmelCase__ ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )-> int:
UpperCAmelCase__ : List[str] = self.num_labels
UpperCAmelCase__ : Dict = TFConvBertForTokenClassification(config=__UpperCamelCase )
UpperCAmelCase__ : Any = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
UpperCAmelCase__ : List[str] = model(__UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowerCAmelCase__ ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )-> List[str]:
UpperCAmelCase__ : int = TFConvBertForQuestionAnswering(config=__UpperCamelCase )
UpperCAmelCase__ : Tuple = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
UpperCAmelCase__ : Tuple = model(__UpperCamelCase )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowerCAmelCase__ ( self )-> Union[str, Any]:
UpperCAmelCase__ : Any = self.prepare_config_and_inputs()
(
(
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) ,
) : Dict = config_and_inputs
UpperCAmelCase__ : Optional[int] = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_tf
class _lowercase ( lowerCAmelCase_ , lowerCAmelCase_ , unittest.TestCase ):
'''simple docstring'''
_A = (
(
TFConvBertModel,
TFConvBertForMaskedLM,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertForMultipleChoice,
)
if is_tf_available()
else ()
)
_A = (
{
'feature-extraction': TFConvBertModel,
'fill-mask': TFConvBertForMaskedLM,
'question-answering': TFConvBertForQuestionAnswering,
'text-classification': TFConvBertForSequenceClassification,
'token-classification': TFConvBertForTokenClassification,
'zero-shot': TFConvBertForSequenceClassification,
}
if is_tf_available()
else {}
)
_A = False
_A = False
_A = False
def lowerCAmelCase__ ( self )-> List[str]:
UpperCAmelCase__ : Optional[int] = TFConvBertModelTester(self )
UpperCAmelCase__ : Union[str, Any] = ConfigTester(self , config_class=__UpperCamelCase , hidden_size=37 )
def lowerCAmelCase__ ( self )-> Tuple:
self.config_tester.run_common_tests()
def lowerCAmelCase__ ( self )-> int:
UpperCAmelCase__ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__UpperCamelCase )
def lowerCAmelCase__ ( self )-> str:
UpperCAmelCase__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*__UpperCamelCase )
def lowerCAmelCase__ ( self )-> Optional[Any]:
UpperCAmelCase__ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*__UpperCamelCase )
def lowerCAmelCase__ ( self )-> int:
UpperCAmelCase__ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*__UpperCamelCase )
def lowerCAmelCase__ ( self )-> Optional[int]:
UpperCAmelCase__ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*__UpperCamelCase )
def lowerCAmelCase__ ( self )-> int:
UpperCAmelCase__ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*__UpperCamelCase )
@slow
def lowerCAmelCase__ ( self )-> Dict:
UpperCAmelCase__ , UpperCAmelCase__ : int = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase__ : List[Any] = True
UpperCAmelCase__ : Optional[int] = True
if hasattr(__UpperCamelCase , "use_cache" ):
UpperCAmelCase__ : List[Any] = True
UpperCAmelCase__ : int = getattr(self.model_tester , "encoder_seq_length" , self.model_tester.seq_length )
UpperCAmelCase__ : Any = getattr(self.model_tester , "key_length" , __UpperCamelCase )
for model_class in self.all_model_classes:
UpperCAmelCase__ : Tuple = self._prepare_for_class(__UpperCamelCase , __UpperCamelCase )
UpperCAmelCase__ : Union[str, Any] = model_class(__UpperCamelCase )
UpperCAmelCase__ : str = len(model(__UpperCamelCase ) )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(__UpperCamelCase , saved_model=__UpperCamelCase )
UpperCAmelCase__ : Union[str, Any] = os.path.join(__UpperCamelCase , "saved_model" , "1" )
UpperCAmelCase__ : int = tf.keras.models.load_model(__UpperCamelCase )
UpperCAmelCase__ : List[Any] = model(__UpperCamelCase )
if self.is_encoder_decoder:
UpperCAmelCase__ : Optional[int] = outputs["encoder_hidden_states"]
UpperCAmelCase__ : Tuple = outputs["encoder_attentions"]
else:
UpperCAmelCase__ : List[Any] = outputs["hidden_states"]
UpperCAmelCase__ : Union[str, Any] = outputs["attentions"]
self.assertEqual(len(__UpperCamelCase ) , __UpperCamelCase )
UpperCAmelCase__ : Optional[int] = getattr(
self.model_tester , "expected_num_hidden_layers" , self.model_tester.num_hidden_layers + 1 )
self.assertEqual(len(__UpperCamelCase ) , __UpperCamelCase )
self.assertListEqual(
list(output_hidden_states[0].shape[-2:] ) , [self.model_tester.seq_length, self.model_tester.hidden_size] , )
self.assertEqual(len(__UpperCamelCase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(output_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , )
@slow
def lowerCAmelCase__ ( self )-> Union[str, Any]:
UpperCAmelCase__ : Tuple = TFConvBertModel.from_pretrained("YituTech/conv-bert-base" )
self.assertIsNotNone(__UpperCamelCase )
def lowerCAmelCase__ ( self )-> Union[str, Any]:
UpperCAmelCase__ , UpperCAmelCase__ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase__ : Optional[int] = True
UpperCAmelCase__ : List[Any] = getattr(self.model_tester , "decoder_seq_length" , self.model_tester.seq_length )
UpperCAmelCase__ : int = getattr(self.model_tester , "encoder_seq_length" , self.model_tester.seq_length )
UpperCAmelCase__ : Union[str, Any] = getattr(self.model_tester , "key_length" , __UpperCamelCase )
UpperCAmelCase__ : Any = getattr(self.model_tester , "key_length" , __UpperCamelCase )
def check_decoder_attentions_output(__UpperCamelCase ):
UpperCAmelCase__ : str = len(__UpperCamelCase )
self.assertEqual(out_len % 2 , 0 )
UpperCAmelCase__ : Union[str, Any] = outputs.decoder_attentions
self.assertEqual(len(__UpperCamelCase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, decoder_seq_length, decoder_key_length] , )
def check_encoder_attentions_output(__UpperCamelCase ):
UpperCAmelCase__ : Optional[Any] = [
t.numpy() for t in (outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions)
]
self.assertEqual(len(__UpperCamelCase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , )
for model_class in self.all_model_classes:
UpperCAmelCase__ : List[Any] = True
UpperCAmelCase__ : List[Any] = False
UpperCAmelCase__ : Optional[int] = model_class(__UpperCamelCase )
UpperCAmelCase__ : Union[str, Any] = model(self._prepare_for_class(__UpperCamelCase , __UpperCamelCase ) )
UpperCAmelCase__ : Optional[Any] = len(__UpperCamelCase )
self.assertEqual(config.output_hidden_states , __UpperCamelCase )
check_encoder_attentions_output(__UpperCamelCase )
if self.is_encoder_decoder:
UpperCAmelCase__ : List[str] = model_class(__UpperCamelCase )
UpperCAmelCase__ : List[Any] = model(self._prepare_for_class(__UpperCamelCase , __UpperCamelCase ) )
self.assertEqual(config.output_hidden_states , __UpperCamelCase )
check_decoder_attentions_output(__UpperCamelCase )
# Check that output attentions can also be changed via the config
del inputs_dict["output_attentions"]
UpperCAmelCase__ : Tuple = True
UpperCAmelCase__ : Optional[Any] = model_class(__UpperCamelCase )
UpperCAmelCase__ : Optional[int] = model(self._prepare_for_class(__UpperCamelCase , __UpperCamelCase ) )
self.assertEqual(config.output_hidden_states , __UpperCamelCase )
check_encoder_attentions_output(__UpperCamelCase )
# Check attention is always last and order is fine
UpperCAmelCase__ : Union[str, Any] = True
UpperCAmelCase__ : Tuple = True
UpperCAmelCase__ : Dict = model_class(__UpperCamelCase )
UpperCAmelCase__ : Any = model(self._prepare_for_class(__UpperCamelCase , __UpperCamelCase ) )
self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1) , len(__UpperCamelCase ) )
self.assertEqual(model.config.output_hidden_states , __UpperCamelCase )
check_encoder_attentions_output(__UpperCamelCase )
@require_tf
class _lowercase ( unittest.TestCase ):
'''simple docstring'''
@slow
def lowerCAmelCase__ ( self )-> Optional[Any]:
UpperCAmelCase__ : Optional[int] = TFConvBertModel.from_pretrained("YituTech/conv-bert-base" )
UpperCAmelCase__ : Union[str, Any] = tf.constant([[0, 1, 2, 3, 4, 5]] )
UpperCAmelCase__ : int = model(__UpperCamelCase )[0]
UpperCAmelCase__ : Any = [1, 6, 7_68]
self.assertEqual(output.shape , __UpperCamelCase )
UpperCAmelCase__ : Optional[int] = tf.constant(
[
[
[-0.0347_5493, -0.468_6034, -0.3063_8832],
[0.2263_7248, -0.2698_8646, -0.742_3424],
[0.1032_4868, -0.4501_3508, -0.5828_0784],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , __UpperCamelCase , atol=1E-4 )
| 660 |
"""simple docstring"""
def a__ ( lowerCAmelCase : int ):
'''simple docstring'''
if a < 0:
raise ValueError("Input value must be a positive integer" )
elif isinstance(lowerCAmelCase , lowerCAmelCase ):
raise TypeError("Input value must be a 'int' type" )
return bin(lowerCAmelCase ).count("1" )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 660 | 1 |
"""simple docstring"""
import warnings
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class _lowercase ( lowerCAmelCase_ ):
'''simple docstring'''
_A = ['image_processor', 'tokenizer']
_A = 'LayoutLMv2ImageProcessor'
_A = ('LayoutXLMTokenizer', 'LayoutXLMTokenizerFast')
def __init__( self , __UpperCamelCase=None , __UpperCamelCase=None , **__UpperCamelCase )-> Any:
if "feature_extractor" in kwargs:
warnings.warn(
"The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"
" instead." , __UpperCamelCase , )
UpperCAmelCase__ : str = kwargs.pop("feature_extractor" )
UpperCAmelCase__ : List[Any] = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("You need to specify an `image_processor`." )
if tokenizer is None:
raise ValueError("You need to specify a `tokenizer`." )
super().__init__(__UpperCamelCase , __UpperCamelCase )
def __call__( self , __UpperCamelCase , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = True , __UpperCamelCase = False , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = 0 , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = False , __UpperCamelCase = False , __UpperCamelCase = False , __UpperCamelCase = False , __UpperCamelCase = True , __UpperCamelCase = None , **__UpperCamelCase , )-> BatchEncoding:
# verify input
if self.image_processor.apply_ocr and (boxes is not None):
raise ValueError(
"You cannot provide bounding boxes "
"if you initialized the image processor with apply_ocr set to True." )
if self.image_processor.apply_ocr and (word_labels is not None):
raise ValueError(
"You cannot provide word labels if you initialized the image processor with apply_ocr set to True." )
if return_overflowing_tokens is True and return_offsets_mapping is False:
raise ValueError("You cannot return overflowing tokens without returning the offsets mapping." )
# first, apply the image processor
UpperCAmelCase__ : int = self.image_processor(images=__UpperCamelCase , return_tensors=__UpperCamelCase )
# second, apply the tokenizer
if text is not None and self.image_processor.apply_ocr and text_pair is None:
if isinstance(__UpperCamelCase , __UpperCamelCase ):
UpperCAmelCase__ : int = [text] # add batch dimension (as the image processor always adds a batch dimension)
UpperCAmelCase__ : int = features["words"]
UpperCAmelCase__ : Optional[Any] = self.tokenizer(
text=text if text is not None else features["words"] , text_pair=text_pair if text_pair is not None else None , boxes=boxes if boxes is not None else features["boxes"] , word_labels=__UpperCamelCase , add_special_tokens=__UpperCamelCase , padding=__UpperCamelCase , truncation=__UpperCamelCase , max_length=__UpperCamelCase , stride=__UpperCamelCase , pad_to_multiple_of=__UpperCamelCase , return_token_type_ids=__UpperCamelCase , return_attention_mask=__UpperCamelCase , return_overflowing_tokens=__UpperCamelCase , return_special_tokens_mask=__UpperCamelCase , return_offsets_mapping=__UpperCamelCase , return_length=__UpperCamelCase , verbose=__UpperCamelCase , return_tensors=__UpperCamelCase , **__UpperCamelCase , )
# add pixel values
UpperCAmelCase__ : Optional[int] = features.pop("pixel_values" )
if return_overflowing_tokens is True:
UpperCAmelCase__ : Any = self.get_overflowing_images(__UpperCamelCase , encoded_inputs["overflow_to_sample_mapping"] )
UpperCAmelCase__ : int = images
return encoded_inputs
def lowerCAmelCase__ ( self , __UpperCamelCase , __UpperCamelCase )-> List[str]:
# in case there's an overflow, ensure each `input_ids` sample is mapped to its corresponding image
UpperCAmelCase__ : Dict = []
for sample_idx in overflow_to_sample_mapping:
images_with_overflow.append(images[sample_idx] )
if len(__UpperCamelCase ) != len(__UpperCamelCase ):
raise ValueError(
"Expected length of images to be the same as the length of `overflow_to_sample_mapping`, but got"
F" {len(__UpperCamelCase )} and {len(__UpperCamelCase )}" )
return images_with_overflow
def lowerCAmelCase__ ( self , *__UpperCamelCase , **__UpperCamelCase )-> Optional[int]:
return self.tokenizer.batch_decode(*__UpperCamelCase , **__UpperCamelCase )
def lowerCAmelCase__ ( self , *__UpperCamelCase , **__UpperCamelCase )-> Optional[Any]:
return self.tokenizer.decode(*__UpperCamelCase , **__UpperCamelCase )
@property
def lowerCAmelCase__ ( self )-> Union[str, Any]:
return ["input_ids", "bbox", "attention_mask", "image"]
@property
def lowerCAmelCase__ ( self )-> Optional[int]:
warnings.warn(
"`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead." , __UpperCamelCase , )
return self.image_processor_class
@property
def lowerCAmelCase__ ( self )-> Dict:
warnings.warn(
"`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead." , __UpperCamelCase , )
return self.image_processor
| 660 |
"""simple docstring"""
import random
import timeit
from functools import wraps
from typing import Callable, Optional
from ..configuration_utils import PretrainedConfig
from ..models.auto.modeling_tf_auto import TF_MODEL_MAPPING, TF_MODEL_WITH_LM_HEAD_MAPPING
from ..utils import is_pyanvml_available, is_tf_available, logging
from .benchmark_utils import (
Benchmark,
Memory,
MemorySummary,
measure_peak_memory_cpu,
start_memory_tracing,
stop_memory_tracing,
)
if is_tf_available():
import tensorflow as tf
from tensorflow.python.framework.errors_impl import ResourceExhaustedError
from .benchmark_args_tf import TensorFlowBenchmarkArguments
if is_pyanvml_available():
import pyanvml.pyanvml as nvml
A__ : Optional[Any] = logging.get_logger(__name__)
def a__ ( lowerCAmelCase : bool , lowerCAmelCase : bool ):
'''simple docstring'''
def run_func(lowerCAmelCase : Dict ):
@wraps(lowerCAmelCase )
def run_in_eager_mode(*lowerCAmelCase : Optional[Any] , **lowerCAmelCase : Dict ):
return func(*lowerCAmelCase , **lowerCAmelCase )
@wraps(lowerCAmelCase )
@tf.function(experimental_compile=lowerCAmelCase )
def run_in_graph_mode(*lowerCAmelCase : Optional[Any] , **lowerCAmelCase : Optional[Any] ):
return func(*lowerCAmelCase , **lowerCAmelCase )
if do_eager_mode is True:
if use_xla is not False:
raise ValueError(
"Cannot run model in XLA, if `args.eager_mode` is set to `True`. Please set `args.eager_mode=False`." )
return run_in_eager_mode
else:
return run_in_graph_mode
return run_func
def a__ ( lowerCAmelCase : int , lowerCAmelCase : int , lowerCAmelCase : int ):
'''simple docstring'''
UpperCAmelCase__ : Dict = random.Random()
UpperCAmelCase__ : List[str] = [rng.randint(0 , vocab_size - 1 ) for i in range(batch_size * sequence_length )]
return tf.constant(lowerCAmelCase , shape=(batch_size, sequence_length) , dtype=tf.intaa )
class _lowercase ( lowerCAmelCase_ ):
'''simple docstring'''
_A = 42
_A = 42
_A = "TensorFlow"
@property
def lowerCAmelCase__ ( self )-> Optional[int]:
return tf.__version__
def lowerCAmelCase__ ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )-> float:
# initialize GPU on separate process
UpperCAmelCase__ : Any = self.args.strategy
if strategy is None:
raise ValueError("A device strategy has to be initialized before using TensorFlow." )
UpperCAmelCase__ : Union[str, Any] = self._prepare_inference_func(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
return self._measure_speed(_inference )
def lowerCAmelCase__ ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )-> float:
UpperCAmelCase__ : List[Any] = self.args.strategy
if strategy is None:
raise ValueError("A device strategy has to be initialized before using TensorFlow." )
UpperCAmelCase__ : List[Any] = self._prepare_train_func(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
return self._measure_speed(_train )
def lowerCAmelCase__ ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )-> [Memory, Optional[MemorySummary]]:
# initialize GPU on separate process
if self.args.is_gpu:
tf.config.experimental.set_memory_growth(self.args.gpu_list[self.args.device_idx] , __UpperCamelCase )
UpperCAmelCase__ : List[str] = self.args.strategy
if strategy is None:
raise ValueError("A device strategy has to be initialized before using TensorFlow." )
UpperCAmelCase__ : Any = self._prepare_inference_func(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
return self._measure_memory(_inference )
def lowerCAmelCase__ ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )-> [Memory, Optional[MemorySummary]]:
if self.args.is_gpu:
tf.config.experimental.set_memory_growth(self.args.gpu_list[self.args.device_idx] , __UpperCamelCase )
UpperCAmelCase__ : Any = self.args.strategy
if strategy is None:
raise ValueError("A device strategy has to be initialized before using TensorFlow." )
UpperCAmelCase__ : Optional[Any] = self._prepare_train_func(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
return self._measure_memory(_train )
def lowerCAmelCase__ ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )-> Callable[[], None]:
UpperCAmelCase__ : Union[str, Any] = self.config_dict[model_name]
if self.args.fpaa:
raise NotImplementedError("Mixed precision is currently not supported." )
UpperCAmelCase__ : Optional[int] = (
hasattr(__UpperCamelCase , "architectures" )
and isinstance(config.architectures , __UpperCamelCase )
and len(config.architectures ) > 0
)
if not self.args.only_pretrain_model and has_model_class_in_config:
try:
UpperCAmelCase__ : str = "TF" + config.architectures[0] # prepend 'TF' for tensorflow model
UpperCAmelCase__ : Any = __import__("transformers" , fromlist=[model_class] )
UpperCAmelCase__ : List[Any] = getattr(__UpperCamelCase , __UpperCamelCase )
UpperCAmelCase__ : Dict = model_cls(__UpperCamelCase )
except ImportError:
raise ImportError(
F"{model_class} does not exist. If you just want to test the pretrained model, you might want to"
" set `--only_pretrain_model` or `args.only_pretrain_model=True`." )
else:
UpperCAmelCase__ : Any = TF_MODEL_MAPPING[config.__class__](__UpperCamelCase )
# encoder-decoder has vocab size saved differently
UpperCAmelCase__ : int = config.vocab_size if hasattr(__UpperCamelCase , "vocab_size" ) else config.encoder.vocab_size
UpperCAmelCase__ : Optional[Any] = random_input_ids(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
@run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla )
def encoder_decoder_forward():
return model(__UpperCamelCase , decoder_input_ids=__UpperCamelCase , training=__UpperCamelCase )
@run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla )
def encoder_forward():
return model(__UpperCamelCase , training=__UpperCamelCase )
UpperCAmelCase__ : Dict = encoder_decoder_forward if config.is_encoder_decoder else encoder_forward
return _inference
def lowerCAmelCase__ ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )-> Callable[[], None]:
UpperCAmelCase__ : List[Any] = self.config_dict[model_name]
if self.args.eager_mode is not False:
raise ValueError("Training cannot be done in eager mode. Please make sure that `args.eager_mode = False`." )
if self.args.fpaa:
raise NotImplementedError("Mixed precision is currently not supported." )
UpperCAmelCase__ : Any = (
hasattr(__UpperCamelCase , "architectures" )
and isinstance(config.architectures , __UpperCamelCase )
and len(config.architectures ) > 0
)
if not self.args.only_pretrain_model and has_model_class_in_config:
try:
UpperCAmelCase__ : Any = "TF" + config.architectures[0] # prepend 'TF' for tensorflow model
UpperCAmelCase__ : int = __import__("transformers" , fromlist=[model_class] )
UpperCAmelCase__ : int = getattr(__UpperCamelCase , __UpperCamelCase )
UpperCAmelCase__ : Optional[Any] = model_cls(__UpperCamelCase )
except ImportError:
raise ImportError(
F"{model_class} does not exist. If you just want to test the pretrained model, you might want to"
" set `--only_pretrain_model` or `args.only_pretrain_model=True`." )
else:
UpperCAmelCase__ : List[str] = TF_MODEL_WITH_LM_HEAD_MAPPING[config.__class__](__UpperCamelCase )
# encoder-decoder has vocab size saved differently
UpperCAmelCase__ : Union[str, Any] = config.vocab_size if hasattr(__UpperCamelCase , "vocab_size" ) else config.encoder.vocab_size
UpperCAmelCase__ : Dict = random_input_ids(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
@run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla )
def encoder_decoder_train():
UpperCAmelCase__ : Union[str, Any] = model(__UpperCamelCase , decoder_input_ids=__UpperCamelCase , labels=__UpperCamelCase , training=__UpperCamelCase )[0]
UpperCAmelCase__ : Union[str, Any] = tf.gradients(__UpperCamelCase , model.trainable_variables )
return gradients
@run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla )
def encoder_train():
UpperCAmelCase__ : Union[str, Any] = model(__UpperCamelCase , labels=__UpperCamelCase , training=__UpperCamelCase )[0]
UpperCAmelCase__ : Any = tf.gradients(__UpperCamelCase , model.trainable_variables )
return gradients
UpperCAmelCase__ : str = encoder_decoder_train if config.is_encoder_decoder else encoder_train
return _train
def lowerCAmelCase__ ( self , __UpperCamelCase )-> float:
with self.args.strategy.scope():
try:
if self.args.is_tpu or self.args.use_xla:
# run additional 10 times to stabilize compilation for tpu
logger.info("Do inference on TPU. Running model 5 times to stabilize compilation" )
timeit.repeat(__UpperCamelCase , repeat=1 , number=5 )
# as written in https://docs.python.org/2/library/timeit.html#timeit.Timer.repeat, min should be taken rather than the average
UpperCAmelCase__ : Optional[Any] = timeit.repeat(
__UpperCamelCase , repeat=self.args.repeat , number=10 , )
return min(__UpperCamelCase ) / 10.0
except ResourceExhaustedError as e:
self.print_fn(F"Doesn't fit on GPU. {e}" )
def lowerCAmelCase__ ( self , __UpperCamelCase )-> [Memory, MemorySummary]:
logger.info(
"Note that TensorFlow allocates more memory than "
"it might need to speed up computation. "
"The memory reported here corresponds to the memory "
"reported by `nvidia-smi`, which can vary depending "
"on total available memory on the GPU that is used." )
with self.args.strategy.scope():
try:
if self.args.trace_memory_line_by_line:
if not self.args.eager_mode:
raise ValueError(
"`args.eager_mode` is set to `False`. Make sure to run model in eager mode to measure memory"
" consumption line by line." )
UpperCAmelCase__ : List[str] = start_memory_tracing("transformers" )
if self.args.is_tpu:
# tpu
raise NotImplementedError(
"Memory Benchmarking is currently not implemented for TPU. Please disable memory benchmarking"
" with `args.memory=False`" )
elif self.args.is_gpu:
# gpu
if not is_pyanvml_available():
logger.warning(
"py3nvml not installed, we won't log GPU memory usage. "
"Install py3nvml (pip install py3nvml) to log information about GPU." )
UpperCAmelCase__ : Optional[int] = "N/A"
else:
logger.info(
"Measuring total GPU usage on GPU device. Make sure to not have additional processes"
" running on the same GPU." )
# init nvml
nvml.nvmlInit()
func()
UpperCAmelCase__ : Any = nvml.nvmlDeviceGetHandleByIndex(self.args.device_idx )
UpperCAmelCase__ : Optional[int] = nvml.nvmlDeviceGetMemoryInfo(__UpperCamelCase )
UpperCAmelCase__ : str = meminfo.used
UpperCAmelCase__ : int = Memory(__UpperCamelCase )
# shutdown nvml
nvml.nvmlShutdown()
else:
# cpu
if self.args.trace_memory_line_by_line:
logger.info(
"When enabling line by line tracing, the max peak memory for CPU is inaccurate in"
" TensorFlow." )
UpperCAmelCase__ : Any = None
else:
UpperCAmelCase__ : List[Any] = measure_peak_memory_cpu(__UpperCamelCase )
UpperCAmelCase__ : Optional[Any] = Memory(__UpperCamelCase ) if isinstance(__UpperCamelCase , __UpperCamelCase ) else memory_bytes
if self.args.trace_memory_line_by_line:
UpperCAmelCase__ : Optional[Any] = stop_memory_tracing(__UpperCamelCase )
if memory is None:
UpperCAmelCase__ : Tuple = summary.total
else:
UpperCAmelCase__ : int = None
return memory, summary
except ResourceExhaustedError as e:
self.print_fn(F"Doesn't fit on GPU. {e}" )
return "N/A", None
| 660 | 1 |
"""simple docstring"""
import os
A__ : List[str] = {"""I""": 1, """V""": 5, """X""": 10, """L""": 50, """C""": 100, """D""": 500, """M""": 1_000}
def a__ ( lowerCAmelCase : str ):
'''simple docstring'''
UpperCAmelCase__ : Tuple = 0
UpperCAmelCase__ : str = 0
while index < len(lowerCAmelCase ) - 1:
UpperCAmelCase__ : str = SYMBOLS[numerals[index]]
UpperCAmelCase__ : int = SYMBOLS[numerals[index + 1]]
if current_value < next_value:
total_value -= current_value
else:
total_value += current_value
index += 1
total_value += SYMBOLS[numerals[index]]
return total_value
def a__ ( lowerCAmelCase : int ):
'''simple docstring'''
UpperCAmelCase__ : List[str] = ""
UpperCAmelCase__ : int = num // 1000
numerals += m_count * "M"
num %= 1000
UpperCAmelCase__ : Dict = num // 100
if c_count == 9:
numerals += "CM"
c_count -= 9
elif c_count == 4:
numerals += "CD"
c_count -= 4
if c_count >= 5:
numerals += "D"
c_count -= 5
numerals += c_count * "C"
num %= 100
UpperCAmelCase__ : str = num // 10
if x_count == 9:
numerals += "XC"
x_count -= 9
elif x_count == 4:
numerals += "XL"
x_count -= 4
if x_count >= 5:
numerals += "L"
x_count -= 5
numerals += x_count * "X"
num %= 10
if num == 9:
numerals += "IX"
num -= 9
elif num == 4:
numerals += "IV"
num -= 4
if num >= 5:
numerals += "V"
num -= 5
numerals += num * "I"
return numerals
def a__ ( lowerCAmelCase : str = "/p089_roman.txt" ):
'''simple docstring'''
UpperCAmelCase__ : Any = 0
with open(os.path.dirname(lowerCAmelCase ) + roman_numerals_filename ) as filea:
UpperCAmelCase__ : Dict = filea.readlines()
for line in lines:
UpperCAmelCase__ : Any = line.strip()
UpperCAmelCase__ : str = parse_roman_numerals(lowerCAmelCase )
UpperCAmelCase__ : Any = generate_roman_numerals(lowerCAmelCase )
savings += len(lowerCAmelCase ) - len(lowerCAmelCase )
return savings
if __name__ == "__main__":
print(f"""{solution() = }""")
| 660 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ....utils import _LazyModule
A__ : List[str] = {"""tokenization_tapex""": ["""TapexTokenizer"""]}
if TYPE_CHECKING:
from .tokenization_tapex import TapexTokenizer
else:
import sys
A__ : Any = _LazyModule(__name__, globals()["""__file__"""], _import_structure)
| 660 | 1 |
"""simple docstring"""
import inspect
from typing import List, Optional, Tuple, Union
import numpy as np
import PIL
import torch
import torch.utils.checkpoint
from ...models import UNetaDModel, VQModel
from ...schedulers import (
DDIMScheduler,
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
)
from ...utils import PIL_INTERPOLATION, randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
def a__ ( lowerCAmelCase : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase__ , UpperCAmelCase__ : Any = image.size
UpperCAmelCase__ , UpperCAmelCase__ : List[str] = (x - x % 32 for x in (w, h)) # resize to integer multiple of 32
UpperCAmelCase__ : str = image.resize((w, h) , resample=PIL_INTERPOLATION["lanczos"] )
UpperCAmelCase__ : Union[str, Any] = np.array(lowerCAmelCase ).astype(np.floataa ) / 255.0
UpperCAmelCase__ : List[str] = image[None].transpose(0 , 3 , 1 , 2 )
UpperCAmelCase__ : List[Any] = torch.from_numpy(lowerCAmelCase )
return 2.0 * image - 1.0
class _lowercase ( lowerCAmelCase_ ):
'''simple docstring'''
def __init__( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , )-> Optional[int]:
super().__init__()
self.register_modules(vqvae=__UpperCamelCase , unet=__UpperCamelCase , scheduler=__UpperCamelCase )
@torch.no_grad()
def __call__( self , __UpperCamelCase = None , __UpperCamelCase = 1 , __UpperCamelCase = 1_00 , __UpperCamelCase = 0.0 , __UpperCamelCase = None , __UpperCamelCase = "pil" , __UpperCamelCase = True , )-> Union[Tuple, ImagePipelineOutput]:
if isinstance(__UpperCamelCase , PIL.Image.Image ):
UpperCAmelCase__ : Dict = 1
elif isinstance(__UpperCamelCase , torch.Tensor ):
UpperCAmelCase__ : Optional[int] = image.shape[0]
else:
raise ValueError(F"`image` has to be of type `PIL.Image.Image` or `torch.Tensor` but is {type(__UpperCamelCase )}" )
if isinstance(__UpperCamelCase , PIL.Image.Image ):
UpperCAmelCase__ : str = preprocess(__UpperCamelCase )
UpperCAmelCase__ , UpperCAmelCase__ : Any = image.shape[-2:]
# in_channels should be 6: 3 for latents, 3 for low resolution image
UpperCAmelCase__ : int = (batch_size, self.unet.config.in_channels // 2, height, width)
UpperCAmelCase__ : Union[str, Any] = next(self.unet.parameters() ).dtype
UpperCAmelCase__ : List[Any] = randn_tensor(__UpperCamelCase , generator=__UpperCamelCase , device=self.device , dtype=__UpperCamelCase )
UpperCAmelCase__ : Optional[int] = image.to(device=self.device , dtype=__UpperCamelCase )
# set timesteps and move to the correct device
self.scheduler.set_timesteps(__UpperCamelCase , device=self.device )
UpperCAmelCase__ : List[Any] = self.scheduler.timesteps
# scale the initial noise by the standard deviation required by the scheduler
UpperCAmelCase__ : Optional[Any] = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature.
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
UpperCAmelCase__ : Optional[Any] = "eta" in set(inspect.signature(self.scheduler.step ).parameters.keys() )
UpperCAmelCase__ : Optional[int] = {}
if accepts_eta:
UpperCAmelCase__ : List[str] = eta
for t in self.progress_bar(__UpperCamelCase ):
# concat latents and low resolution image in the channel dimension.
UpperCAmelCase__ : Any = torch.cat([latents, image] , dim=1 )
UpperCAmelCase__ : Optional[Any] = self.scheduler.scale_model_input(__UpperCamelCase , __UpperCamelCase )
# predict the noise residual
UpperCAmelCase__ : Union[str, Any] = self.unet(__UpperCamelCase , __UpperCamelCase ).sample
# compute the previous noisy sample x_t -> x_t-1
UpperCAmelCase__ : List[Any] = self.scheduler.step(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , **__UpperCamelCase ).prev_sample
# decode the image latents with the VQVAE
UpperCAmelCase__ : List[Any] = self.vqvae.decode(__UpperCamelCase ).sample
UpperCAmelCase__ : List[Any] = torch.clamp(__UpperCamelCase , -1.0 , 1.0 )
UpperCAmelCase__ : Any = image / 2 + 0.5
UpperCAmelCase__ : Optional[int] = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
UpperCAmelCase__ : Any = self.numpy_to_pil(__UpperCamelCase )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=__UpperCamelCase )
| 660 |
"""simple docstring"""
from typing import Optional, Union
import torch
from torch import nn
from ...configuration_utils import ConfigMixin, register_to_config
from ...models.modeling_utils import ModelMixin
class _lowercase ( lowerCAmelCase_ , lowerCAmelCase_ ):
'''simple docstring'''
@register_to_config
def __init__( self , __UpperCamelCase = 7_68 , )-> Union[str, Any]:
super().__init__()
UpperCAmelCase__ : str = nn.Parameter(torch.zeros(1 , __UpperCamelCase ) )
UpperCAmelCase__ : Optional[int] = nn.Parameter(torch.ones(1 , __UpperCamelCase ) )
def lowerCAmelCase__ ( self , __UpperCamelCase = None , __UpperCamelCase = None , )-> Any:
UpperCAmelCase__ : Dict = nn.Parameter(self.mean.to(__UpperCamelCase ).to(__UpperCamelCase ) )
UpperCAmelCase__ : Any = nn.Parameter(self.std.to(__UpperCamelCase ).to(__UpperCamelCase ) )
return self
def lowerCAmelCase__ ( self , __UpperCamelCase )-> Union[str, Any]:
UpperCAmelCase__ : Dict = (embeds - self.mean) * 1.0 / self.std
return embeds
def lowerCAmelCase__ ( self , __UpperCamelCase )-> List[Any]:
UpperCAmelCase__ : Any = (embeds * self.std) + self.mean
return embeds
| 660 | 1 |
"""simple docstring"""
import random
import unittest
import torch
from diffusers import IFImgaImgSuperResolutionPipeline
from diffusers.utils import floats_tensor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import skip_mps, torch_device
from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
from . import IFPipelineTesterMixin
@skip_mps
class _lowercase ( lowerCAmelCase_ , lowerCAmelCase_ , unittest.TestCase ):
'''simple docstring'''
_A = IFImgaImgSuperResolutionPipeline
_A = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {'width', 'height'}
_A = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS.union({'original_image'} )
_A = PipelineTesterMixin.required_optional_params - {'latents'}
def lowerCAmelCase__ ( self )-> Any:
return self._get_superresolution_dummy_components()
def lowerCAmelCase__ ( self , __UpperCamelCase , __UpperCamelCase=0 )-> List[str]:
if str(__UpperCamelCase ).startswith("mps" ):
UpperCAmelCase__ : Optional[int] = torch.manual_seed(__UpperCamelCase )
else:
UpperCAmelCase__ : str = torch.Generator(device=__UpperCamelCase ).manual_seed(__UpperCamelCase )
UpperCAmelCase__ : List[Any] = floats_tensor((1, 3, 32, 32) , rng=random.Random(__UpperCamelCase ) ).to(__UpperCamelCase )
UpperCAmelCase__ : Union[str, Any] = floats_tensor((1, 3, 16, 16) , rng=random.Random(__UpperCamelCase ) ).to(__UpperCamelCase )
UpperCAmelCase__ : Dict = {
"prompt": "A painting of a squirrel eating a burger",
"image": image,
"original_image": original_image,
"generator": generator,
"num_inference_steps": 2,
"output_type": "numpy",
}
return inputs
@unittest.skipIf(
torch_device != "cuda" or not is_xformers_available() , reason="XFormers attention is only available with CUDA and `xformers` installed" , )
def lowerCAmelCase__ ( self )-> Tuple:
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 )
def lowerCAmelCase__ ( self )-> List[str]:
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != "cuda" , reason="float16 requires CUDA" )
def lowerCAmelCase__ ( self )-> str:
# Due to non-determinism in save load of the hf-internal-testing/tiny-random-t5 text encoder
super().test_save_load_floataa(expected_max_diff=1E-1 )
def lowerCAmelCase__ ( self )-> Tuple:
self._test_attention_slicing_forward_pass(expected_max_diff=1E-2 )
def lowerCAmelCase__ ( self )-> List[Any]:
self._test_save_load_local()
def lowerCAmelCase__ ( self )-> Union[str, Any]:
self._test_inference_batch_single_identical(
expected_max_diff=1E-2 , )
| 660 |
"""simple docstring"""
import argparse
import torch
from transformers import GPTaConfig, GPTaModel, load_tf_weights_in_gpta
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
logging.set_verbosity_info()
def a__ ( lowerCAmelCase : List[Any] , lowerCAmelCase : Any , lowerCAmelCase : List[Any] ):
'''simple docstring'''
# Construct model
if gpta_config_file == "":
UpperCAmelCase__ : Optional[int] = GPTaConfig()
else:
UpperCAmelCase__ : Dict = GPTaConfig.from_json_file(lowerCAmelCase )
UpperCAmelCase__ : Optional[Any] = GPTaModel(lowerCAmelCase )
# Load weights from numpy
load_tf_weights_in_gpta(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
# Save pytorch-model
UpperCAmelCase__ : Optional[int] = pytorch_dump_folder_path + "/" + WEIGHTS_NAME
UpperCAmelCase__ : Any = pytorch_dump_folder_path + "/" + CONFIG_NAME
print(F"Save PyTorch model to {pytorch_weights_dump_path}" )
torch.save(model.state_dict() , lowerCAmelCase )
print(F"Save configuration file to {pytorch_config_dump_path}" )
with open(lowerCAmelCase , "w" , encoding="utf-8" ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
A__ : Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--gpt2_checkpoint_path""", default=None, type=str, required=True, help="""Path to the TensorFlow checkpoint path."""
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
parser.add_argument(
"""--gpt2_config_file""",
default="""""",
type=str,
help=(
"""An optional config json file corresponding to the pre-trained OpenAI model. \n"""
"""This specifies the model architecture."""
),
)
A__ : Optional[Any] = parser.parse_args()
convert_gpta_checkpoint_to_pytorch(args.gpta_checkpoint_path, args.gpta_config_file, args.pytorch_dump_folder_path)
| 660 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ....utils import _LazyModule
A__ : List[str] = {"""tokenization_tapex""": ["""TapexTokenizer"""]}
if TYPE_CHECKING:
from .tokenization_tapex import TapexTokenizer
else:
import sys
A__ : Any = _LazyModule(__name__, globals()["""__file__"""], _import_structure)
| 660 |
"""simple docstring"""
import argparse
import os
import torch
from transformers.utils import WEIGHTS_NAME
A__ : Optional[int] = ["""small""", """medium""", """large"""]
A__ : Optional[int] = """lm_head.decoder.weight"""
A__ : Dict = """lm_head.weight"""
def a__ ( lowerCAmelCase : str , lowerCAmelCase : str ):
'''simple docstring'''
UpperCAmelCase__ : Dict = torch.load(lowerCAmelCase )
UpperCAmelCase__ : Optional[int] = d.pop(lowerCAmelCase )
os.makedirs(lowerCAmelCase , exist_ok=lowerCAmelCase )
torch.save(lowerCAmelCase , os.path.join(lowerCAmelCase , lowerCAmelCase ) )
if __name__ == "__main__":
A__ : List[Any] = argparse.ArgumentParser()
parser.add_argument("""--dialogpt_path""", default=""".""", type=str)
A__ : Tuple = parser.parse_args()
for MODEL in DIALOGPT_MODELS:
A__ : Tuple = os.path.join(args.dialogpt_path, f"""{MODEL}_ft.pkl""")
A__ : str = f"""./DialoGPT-{MODEL}"""
convert_dialogpt_checkpoint(
checkpoint_path,
pytorch_dump_folder_path,
)
| 660 | 1 |
"""simple docstring"""
# This script creates a super tiny model that is useful inside tests, when we just want to test that
# the machinery works, without needing to the check the quality of the outcomes.
#
# This version creates a tiny vocab first, and then a tiny model - so the outcome is truly tiny -
# all files ~60KB. As compared to taking a full-size model, reducing to the minimum its layers and
# emb dimensions, but keeping the full vocab + merges files, leading to ~3MB in total for all files.
# The latter is done by `fsmt-make-super-tiny-model.py`.
#
# It will be used then as "stas/tiny-wmt19-en-ru"
from pathlib import Path
import json
import tempfile
from transformers import FSMTTokenizer, FSMTConfig, FSMTForConditionalGeneration
from transformers.models.fsmt.tokenization_fsmt import VOCAB_FILES_NAMES
A__ : Optional[int] = """tiny-wmt19-en-ru"""
# Build
# borrowed from a test
A__ : Any = [
"""l""",
"""o""",
"""w""",
"""e""",
"""r""",
"""s""",
"""t""",
"""i""",
"""d""",
"""n""",
"""w</w>""",
"""r</w>""",
"""t</w>""",
"""lo""",
"""low""",
"""er</w>""",
"""low</w>""",
"""lowest</w>""",
"""newer</w>""",
"""wider</w>""",
"""<unk>""",
]
A__ : List[str] = dict(zip(vocab, range(len(vocab))))
A__ : str = ["""l o 123""", """lo w 1456""", """e r</w> 1789""", """"""]
with tempfile.TemporaryDirectory() as tmpdirname:
A__ : str = Path(tmpdirname)
A__ : List[str] = build_dir / VOCAB_FILES_NAMES["""src_vocab_file"""]
A__ : Any = build_dir / VOCAB_FILES_NAMES["""tgt_vocab_file"""]
A__ : Optional[int] = build_dir / VOCAB_FILES_NAMES["""merges_file"""]
with open(src_vocab_file, """w""") as fp:
fp.write(json.dumps(vocab_tokens))
with open(tgt_vocab_file, """w""") as fp:
fp.write(json.dumps(vocab_tokens))
with open(merges_file, """w""") as fp:
fp.write("""\n""".join(merges))
A__ : Optional[Any] = FSMTTokenizer(
langs=["""en""", """ru"""],
src_vocab_size=len(vocab),
tgt_vocab_size=len(vocab),
src_vocab_file=src_vocab_file,
tgt_vocab_file=tgt_vocab_file,
merges_file=merges_file,
)
A__ : Tuple = FSMTConfig(
langs=["""ru""", """en"""],
src_vocab_size=1_000,
tgt_vocab_size=1_000,
d_model=4,
encoder_layers=1,
decoder_layers=1,
encoder_ffn_dim=4,
decoder_ffn_dim=4,
encoder_attention_heads=1,
decoder_attention_heads=1,
)
A__ : Tuple = FSMTForConditionalGeneration(config)
print(f"""num of params {tiny_model.num_parameters()}""")
# Test
A__ : Dict = tokenizer(["""Making tiny model"""], return_tensors="""pt""")
A__ : str = tiny_model(**batch)
print("""test output:""", len(outputs.logits[0]))
# Save
tiny_model.half() # makes it smaller
tiny_model.save_pretrained(mname_tiny)
tokenizer.save_pretrained(mname_tiny)
print(f"""Generated {mname_tiny}""")
# Upload
# transformers-cli upload tiny-wmt19-en-ru
| 660 |
"""simple docstring"""
from math import isqrt
def a__ ( lowerCAmelCase : int ):
'''simple docstring'''
UpperCAmelCase__ : Tuple = [True] * max_number
for i in range(2 , isqrt(max_number - 1 ) + 1 ):
if is_prime[i]:
for j in range(i**2 , lowerCAmelCase , lowerCAmelCase ):
UpperCAmelCase__ : List[Any] = False
return [i for i in range(2 , lowerCAmelCase ) if is_prime[i]]
def a__ ( lowerCAmelCase : int = 10**8 ):
'''simple docstring'''
UpperCAmelCase__ : Dict = calculate_prime_numbers(max_number // 2 )
UpperCAmelCase__ : Optional[int] = 0
UpperCAmelCase__ : Optional[int] = 0
UpperCAmelCase__ : Tuple = len(lowerCAmelCase ) - 1
while left <= right:
while prime_numbers[left] * prime_numbers[right] >= max_number:
right -= 1
semiprimes_count += right - left + 1
left += 1
return semiprimes_count
if __name__ == "__main__":
print(f"""{solution() = }""")
| 660 | 1 |
"""simple docstring"""
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig, OnnxSeqaSeqConfigWithPast
from ...utils import logging
if TYPE_CHECKING:
from ...feature_extraction_utils import FeatureExtractionMixin
from ...tokenization_utils_base import PreTrainedTokenizerBase
from ...utils import TensorType
A__ : Dict = logging.get_logger(__name__)
A__ : Optional[int] = {
"""openai/whisper-base""": """https://huggingface.co/openai/whisper-base/resolve/main/config.json""",
}
# fmt: off
A__ : List[Any] = [
1, 2, 7, 8, 9, 10, 14, 25,
26, 27, 28, 29, 31, 58, 59, 60, 61, 62,
63, 90, 91, 92, 93, 357, 366, 438, 532, 685,
705, 796, 930, 1_058, 1_220, 1_267, 1_279, 1_303, 1_343, 1_377,
1_391, 1_635, 1_782, 1_875, 2_162, 2_361, 2_488, 3_467, 4_008, 4_211,
4_600, 4_808, 5_299, 5_855, 6_329, 7_203, 9_609, 9_959, 10_563, 10_786,
11_420, 11_709, 11_907, 13_163, 13_697, 13_700, 14_808, 15_306, 16_410, 16_791,
17_992, 19_203, 19_510, 20_724, 22_305, 22_935, 27_007, 30_109, 30_420, 33_409,
34_949, 40_283, 40_493, 40_549, 47_282, 49_146, 50_257, 50_359, 50_360, 50_361
]
A__ : int = [
1, 2, 7, 8, 9, 10, 14, 25,
26, 27, 28, 29, 31, 58, 59, 60, 61, 62,
63, 90, 91, 92, 93, 359, 503, 522, 542, 873,
893, 902, 918, 922, 931, 1_350, 1_853, 1_982, 2_460, 2_627,
3_246, 3_253, 3_268, 3_536, 3_846, 3_961, 4_183, 4_667, 6_585, 6_647,
7_273, 9_061, 9_383, 10_428, 10_929, 11_938, 12_033, 12_331, 12_562, 13_793,
14_157, 14_635, 15_265, 15_618, 16_553, 16_604, 18_362, 18_956, 20_075, 21_675,
22_520, 26_130, 26_161, 26_435, 28_279, 29_464, 31_650, 32_302, 32_470, 36_865,
42_863, 47_425, 49_870, 50_254, 50_258, 50_360, 50_361, 50_362
]
class _lowercase ( lowerCAmelCase_ ):
'''simple docstring'''
_A = 'whisper'
_A = ['past_key_values']
_A = {'num_attention_heads': 'encoder_attention_heads', 'hidden_size': 'd_model'}
def __init__( self , __UpperCamelCase=5_18_65 , __UpperCamelCase=80 , __UpperCamelCase=6 , __UpperCamelCase=4 , __UpperCamelCase=6 , __UpperCamelCase=4 , __UpperCamelCase=15_36 , __UpperCamelCase=15_36 , __UpperCamelCase=0.0 , __UpperCamelCase=0.0 , __UpperCamelCase=5_02_57 , __UpperCamelCase=True , __UpperCamelCase=True , __UpperCamelCase="gelu" , __UpperCamelCase=2_56 , __UpperCamelCase=0.0 , __UpperCamelCase=0.0 , __UpperCamelCase=0.0 , __UpperCamelCase=0.02 , __UpperCamelCase=False , __UpperCamelCase=15_00 , __UpperCamelCase=4_48 , __UpperCamelCase=5_02_56 , __UpperCamelCase=5_02_56 , __UpperCamelCase=5_02_56 , __UpperCamelCase=None , __UpperCamelCase=[2_20, 5_02_56] , __UpperCamelCase=False , __UpperCamelCase=2_56 , __UpperCamelCase=False , __UpperCamelCase=0.05 , __UpperCamelCase=10 , __UpperCamelCase=2 , __UpperCamelCase=0.0 , __UpperCamelCase=10 , __UpperCamelCase=0 , __UpperCamelCase=7 , **__UpperCamelCase , )-> int:
UpperCAmelCase__ : Dict = vocab_size
UpperCAmelCase__ : Optional[int] = num_mel_bins
UpperCAmelCase__ : Dict = d_model
UpperCAmelCase__ : List[Any] = encoder_layers
UpperCAmelCase__ : List[str] = encoder_attention_heads
UpperCAmelCase__ : Tuple = decoder_layers
UpperCAmelCase__ : Union[str, Any] = decoder_attention_heads
UpperCAmelCase__ : Dict = decoder_ffn_dim
UpperCAmelCase__ : List[Any] = encoder_ffn_dim
UpperCAmelCase__ : Union[str, Any] = dropout
UpperCAmelCase__ : Union[str, Any] = attention_dropout
UpperCAmelCase__ : Tuple = activation_dropout
UpperCAmelCase__ : Optional[int] = activation_function
UpperCAmelCase__ : Tuple = init_std
UpperCAmelCase__ : List[str] = encoder_layerdrop
UpperCAmelCase__ : Optional[int] = decoder_layerdrop
UpperCAmelCase__ : Optional[int] = use_cache
UpperCAmelCase__ : int = encoder_layers
UpperCAmelCase__ : List[Any] = scale_embedding # scale factor will be sqrt(d_model) if True
UpperCAmelCase__ : Tuple = max_source_positions
UpperCAmelCase__ : Union[str, Any] = max_target_positions
# Audio Classification-specific parameters. Feel free to ignore for other classes.
UpperCAmelCase__ : List[Any] = classifier_proj_size
UpperCAmelCase__ : List[str] = use_weighted_layer_sum
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
UpperCAmelCase__ : Any = apply_spec_augment
UpperCAmelCase__ : Tuple = mask_time_prob
UpperCAmelCase__ : int = mask_time_length
UpperCAmelCase__ : Tuple = mask_time_min_masks
UpperCAmelCase__ : Optional[int] = mask_feature_prob
UpperCAmelCase__ : str = mask_feature_length
UpperCAmelCase__ : str = mask_feature_min_masks
UpperCAmelCase__ : Union[str, Any] = median_filter_width
super().__init__(
pad_token_id=__UpperCamelCase , bos_token_id=__UpperCamelCase , eos_token_id=__UpperCamelCase , is_encoder_decoder=__UpperCamelCase , decoder_start_token_id=__UpperCamelCase , suppress_tokens=__UpperCamelCase , begin_suppress_tokens=__UpperCamelCase , **__UpperCamelCase , )
class _lowercase ( lowerCAmelCase_ ):
'''simple docstring'''
@property
def lowerCAmelCase__ ( self )-> Mapping[str, Mapping[int, str]]:
UpperCAmelCase__ : List[str] = OrderedDict(
[
("input_features", {0: "batch", 1: "feature_size", 2: "encoder_sequence"}),
] )
if self.use_past:
UpperCAmelCase__ : List[str] = {0: "batch"}
else:
UpperCAmelCase__ : Optional[Any] = {0: "batch", 1: "decoder_sequence"}
if self.use_past:
self.fill_with_past_key_values_(__UpperCamelCase , direction="inputs" )
return common_inputs
def lowerCAmelCase__ ( self , __UpperCamelCase , __UpperCamelCase = -1 , __UpperCamelCase = -1 , __UpperCamelCase = False , __UpperCamelCase = None , __UpperCamelCase = 2_20_50 , __UpperCamelCase = 5.0 , __UpperCamelCase = 2_20 , )-> Mapping[str, Any]:
UpperCAmelCase__ : Optional[int] = OrderedDict()
UpperCAmelCase__ : List[str] = OnnxConfig.generate_dummy_inputs(
self , preprocessor=preprocessor.feature_extractor , batch_size=__UpperCamelCase , framework=__UpperCamelCase , sampling_rate=__UpperCamelCase , time_duration=__UpperCamelCase , frequency=__UpperCamelCase , )
UpperCAmelCase__ : str = encoder_inputs["input_features"].shape[2]
UpperCAmelCase__ : str = encoder_sequence_length // 2 if self.use_past else seq_length
UpperCAmelCase__ : Tuple = super().generate_dummy_inputs(
preprocessor.tokenizer , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
UpperCAmelCase__ : int = encoder_inputs.pop("input_features" )
UpperCAmelCase__ : Optional[int] = decoder_inputs.pop("decoder_input_ids" )
if "past_key_values" in decoder_inputs:
UpperCAmelCase__ : str = decoder_inputs.pop("past_key_values" )
return dummy_inputs
@property
def lowerCAmelCase__ ( self )-> float:
return 1E-3
| 660 |
"""simple docstring"""
import unicodedata
from dataclasses import dataclass
from typing import Optional, Union
import numpy as np
from transformers.data.data_collator import DataCollatorMixin
from transformers.file_utils import PaddingStrategy
from transformers.tokenization_utils_base import PreTrainedTokenizerBase
def a__ ( lowerCAmelCase : str , lowerCAmelCase : Optional[Any] , lowerCAmelCase : Dict , lowerCAmelCase : List[Any] ):
'''simple docstring'''
if isinstance(lowerCAmelCase , lowerCAmelCase ):
UpperCAmelCase__ : Optional[int] = np.full((len(lowerCAmelCase ), sequence_length, 2) , lowerCAmelCase )
else:
UpperCAmelCase__ : Optional[Any] = np.full((len(lowerCAmelCase ), sequence_length) , lowerCAmelCase )
for i, tensor in enumerate(lowerCAmelCase ):
if padding_side == "right":
if isinstance(lowerCAmelCase , lowerCAmelCase ):
UpperCAmelCase__ : Dict = tensor[:sequence_length]
else:
UpperCAmelCase__ : Tuple = tensor[:sequence_length]
else:
if isinstance(lowerCAmelCase , lowerCAmelCase ):
UpperCAmelCase__ : Optional[Any] = tensor[:sequence_length]
else:
UpperCAmelCase__ : int = tensor[:sequence_length]
return out_tensor.tolist()
def a__ ( lowerCAmelCase : Optional[int] ):
'''simple docstring'''
UpperCAmelCase__ : Tuple = ord(lowerCAmelCase )
if (cp >= 33 and cp <= 47) or (cp >= 58 and cp <= 64) or (cp >= 91 and cp <= 96) or (cp >= 123 and cp <= 126):
return True
UpperCAmelCase__ : Optional[Any] = unicodedata.category(lowerCAmelCase )
if cat.startswith("P" ):
return True
return False
@dataclass
class _lowercase ( lowerCAmelCase_ ):
'''simple docstring'''
_A = 42
_A = True
_A = None
_A = None
_A = -100
_A = "pt"
def lowerCAmelCase__ ( self , __UpperCamelCase )-> List[str]:
import torch
UpperCAmelCase__ : Optional[Any] = "label" if "label" in features[0].keys() else "labels"
UpperCAmelCase__ : Dict = [feature[label_name] for feature in features] if label_name in features[0].keys() else None
UpperCAmelCase__ : str = self.tokenizer.pad(
__UpperCamelCase , padding=self.padding , max_length=self.max_length , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors="pt" if labels is None else None , )
if labels is None:
return batch
UpperCAmelCase__ : Optional[Any] = torch.tensor(batch["entity_ids"] ).shape[1]
UpperCAmelCase__ : int = self.tokenizer.padding_side
if padding_side == "right":
UpperCAmelCase__ : int = [
list(__UpperCamelCase ) + [self.label_pad_token_id] * (sequence_length - len(__UpperCamelCase )) for label in labels
]
else:
UpperCAmelCase__ : List[Any] = [
[self.label_pad_token_id] * (sequence_length - len(__UpperCamelCase )) + list(__UpperCamelCase ) for label in labels
]
UpperCAmelCase__ : Optional[Any] = [feature["ner_tags"] for feature in features]
UpperCAmelCase__ : int = padding_tensor(__UpperCamelCase , -1 , __UpperCamelCase , __UpperCamelCase )
UpperCAmelCase__ : List[Any] = [feature["original_entity_spans"] for feature in features]
UpperCAmelCase__ : int = padding_tensor(__UpperCamelCase , (-1, -1) , __UpperCamelCase , __UpperCamelCase )
UpperCAmelCase__ : Optional[int] = {k: torch.tensor(__UpperCamelCase , dtype=torch.intaa ) for k, v in batch.items()}
return batch
| 660 | 1 |
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import EsmConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import numpy
import tensorflow as tf
from transformers.models.esm.modeling_tf_esm import (
TF_ESM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFEsmForMaskedLM,
TFEsmForSequenceClassification,
TFEsmForTokenClassification,
TFEsmModel,
)
class _lowercase :
'''simple docstring'''
def __init__( self , __UpperCamelCase , )-> str:
UpperCAmelCase__ : Optional[Any] = parent
UpperCAmelCase__ : Optional[int] = 13
UpperCAmelCase__ : Union[str, Any] = 7
UpperCAmelCase__ : str = True
UpperCAmelCase__ : Optional[int] = True
UpperCAmelCase__ : Dict = True
UpperCAmelCase__ : List[str] = 99
UpperCAmelCase__ : Optional[int] = 32
UpperCAmelCase__ : Dict = 2
UpperCAmelCase__ : List[str] = 4
UpperCAmelCase__ : List[str] = 37
UpperCAmelCase__ : List[Any] = "gelu"
UpperCAmelCase__ : List[Any] = 0.1
UpperCAmelCase__ : List[Any] = 0.1
UpperCAmelCase__ : List[Any] = 5_12
UpperCAmelCase__ : Tuple = 16
UpperCAmelCase__ : int = 2
UpperCAmelCase__ : Dict = 0.02
UpperCAmelCase__ : Optional[Any] = 3
UpperCAmelCase__ : str = 4
UpperCAmelCase__ : List[Any] = None
def lowerCAmelCase__ ( self )-> List[str]:
UpperCAmelCase__ : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCAmelCase__ : Dict = None
if self.use_input_mask:
UpperCAmelCase__ : Union[str, Any] = random_attention_mask([self.batch_size, self.seq_length] )
UpperCAmelCase__ : Optional[Any] = None
UpperCAmelCase__ : Optional[int] = None
UpperCAmelCase__ : Tuple = None
if self.use_labels:
UpperCAmelCase__ : Any = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCAmelCase__ : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
UpperCAmelCase__ : List[str] = ids_tensor([self.batch_size] , self.num_choices )
UpperCAmelCase__ : Optional[int] = EsmConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , pad_token_id=1 , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , )
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowerCAmelCase__ ( self )-> Optional[int]:
(
(
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) ,
) : List[str] = self.prepare_config_and_inputs()
UpperCAmelCase__ : Any = True
UpperCAmelCase__ : List[str] = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
UpperCAmelCase__ : Any = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def lowerCAmelCase__ ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )-> Tuple:
UpperCAmelCase__ : Optional[Any] = TFEsmModel(config=__UpperCamelCase )
UpperCAmelCase__ : int = {"input_ids": input_ids, "attention_mask": input_mask}
UpperCAmelCase__ : Any = model(__UpperCamelCase )
UpperCAmelCase__ : List[str] = [input_ids, input_mask]
UpperCAmelCase__ : Tuple = model(__UpperCamelCase )
UpperCAmelCase__ : Optional[Any] = model(__UpperCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCAmelCase__ ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , )-> str:
UpperCAmelCase__ : List[str] = True
UpperCAmelCase__ : List[str] = TFEsmModel(config=__UpperCamelCase )
UpperCAmelCase__ : Union[str, Any] = {
"input_ids": input_ids,
"attention_mask": input_mask,
"encoder_hidden_states": encoder_hidden_states,
"encoder_attention_mask": encoder_attention_mask,
}
UpperCAmelCase__ : Dict = model(__UpperCamelCase )
UpperCAmelCase__ : Dict = [input_ids, input_mask]
UpperCAmelCase__ : Tuple = model(__UpperCamelCase , encoder_hidden_states=__UpperCamelCase )
# Also check the case where encoder outputs are not passed
UpperCAmelCase__ : List[Any] = model(__UpperCamelCase , attention_mask=__UpperCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCAmelCase__ ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )-> Dict:
UpperCAmelCase__ : str = TFEsmForMaskedLM(config=__UpperCamelCase )
UpperCAmelCase__ : Any = model([input_ids, input_mask] )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowerCAmelCase__ ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )-> Any:
UpperCAmelCase__ : Dict = self.num_labels
UpperCAmelCase__ : Union[str, Any] = TFEsmForTokenClassification(config=__UpperCamelCase )
UpperCAmelCase__ : int = {"input_ids": input_ids, "attention_mask": input_mask}
UpperCAmelCase__ : Tuple = model(__UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowerCAmelCase__ ( self )-> int:
UpperCAmelCase__ : Union[str, Any] = self.prepare_config_and_inputs()
(
(
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) ,
) : str = config_and_inputs
UpperCAmelCase__ : str = {"input_ids": input_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_tf
class _lowercase ( lowerCAmelCase_ , lowerCAmelCase_ , unittest.TestCase ):
'''simple docstring'''
_A = (
(
TFEsmModel,
TFEsmForMaskedLM,
TFEsmForSequenceClassification,
TFEsmForTokenClassification,
)
if is_tf_available()
else ()
)
_A = (
{
'feature-extraction': TFEsmModel,
'fill-mask': TFEsmForMaskedLM,
'text-classification': TFEsmForSequenceClassification,
'token-classification': TFEsmForTokenClassification,
'zero-shot': TFEsmForSequenceClassification,
}
if is_tf_available()
else {}
)
_A = False
_A = False
def lowerCAmelCase__ ( self )-> Dict:
UpperCAmelCase__ : Optional[int] = TFEsmModelTester(self )
UpperCAmelCase__ : List[Any] = ConfigTester(self , config_class=__UpperCamelCase , hidden_size=37 )
def lowerCAmelCase__ ( self )-> Optional[int]:
self.config_tester.run_common_tests()
def lowerCAmelCase__ ( self )-> str:
UpperCAmelCase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__UpperCamelCase )
def lowerCAmelCase__ ( self )-> int:
UpperCAmelCase__ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(*__UpperCamelCase )
def lowerCAmelCase__ ( self )-> Tuple:
UpperCAmelCase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*__UpperCamelCase )
def lowerCAmelCase__ ( self )-> str:
UpperCAmelCase__ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*__UpperCamelCase )
@slow
def lowerCAmelCase__ ( self )-> Union[str, Any]:
for model_name in TF_ESM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase__ : Optional[int] = TFEsmModel.from_pretrained(__UpperCamelCase )
self.assertIsNotNone(__UpperCamelCase )
@unittest.skip("Protein models do not support embedding resizing." )
def lowerCAmelCase__ ( self )-> Any:
pass
@unittest.skip("Protein models do not support embedding resizing." )
def lowerCAmelCase__ ( self )-> Tuple:
pass
def lowerCAmelCase__ ( self )-> Any:
UpperCAmelCase__ , UpperCAmelCase__ : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase__ : Tuple = model_class(__UpperCamelCase )
assert isinstance(model.get_input_embeddings() , tf.keras.layers.Layer )
if model_class is TFEsmForMaskedLM:
# Output embedding test differs from the main test because they're a matrix, not a layer
UpperCAmelCase__ : int = model.get_bias()
assert isinstance(__UpperCamelCase , __UpperCamelCase )
for k, v in name.items():
assert isinstance(__UpperCamelCase , tf.Variable )
else:
UpperCAmelCase__ : Union[str, Any] = model.get_output_embeddings()
assert x is None
UpperCAmelCase__ : Optional[Any] = model.get_bias()
assert name is None
@require_tf
class _lowercase ( unittest.TestCase ):
'''simple docstring'''
@slow
def lowerCAmelCase__ ( self )-> Optional[int]:
UpperCAmelCase__ : Tuple = TFEsmForMaskedLM.from_pretrained("facebook/esm2_t6_8M_UR50D" )
UpperCAmelCase__ : Dict = tf.constant([[0, 1, 2, 3, 4, 5]] )
UpperCAmelCase__ : Tuple = model(__UpperCamelCase )[0]
UpperCAmelCase__ : Optional[Any] = [1, 6, 33]
self.assertEqual(list(output.numpy().shape ) , __UpperCamelCase )
# compare the actual values for a slice.
UpperCAmelCase__ : Optional[int] = tf.constant(
[
[
[8.92_1518, -10.58_9814, -6.467_1307],
[-6.396_7156, -13.91_1377, -1.121_1915],
[-7.78_1247, -13.95_1557, -3.74_0592],
]
] )
self.assertTrue(numpy.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1E-2 ) )
@slow
def lowerCAmelCase__ ( self )-> List[str]:
UpperCAmelCase__ : Tuple = TFEsmModel.from_pretrained("facebook/esm2_t6_8M_UR50D" )
UpperCAmelCase__ : Dict = tf.constant([[0, 6, 4, 13, 5, 4, 16, 12, 11, 7, 2]] )
UpperCAmelCase__ : Optional[int] = model(__UpperCamelCase )[0]
# compare the actual values for a slice.
UpperCAmelCase__ : str = tf.constant(
[
[
[0.1444_3092, 0.5412_5327, 0.324_7739],
[0.3034_0484, 0.0052_6676, 0.3107_7722],
[0.3227_8043, -0.2498_7096, 0.341_4628],
]
] )
self.assertTrue(numpy.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1E-4 ) )
| 660 |
"""simple docstring"""
import timeit
import numpy as np
import datasets
from datasets.arrow_writer import ArrowWriter
from datasets.features.features import _ArrayXD
def a__ ( lowerCAmelCase : List[str] ):
'''simple docstring'''
def wrapper(*lowerCAmelCase : Any , **lowerCAmelCase : Tuple ):
UpperCAmelCase__ : Optional[int] = timeit.default_timer()
UpperCAmelCase__ : int = func(*lowerCAmelCase , **lowerCAmelCase )
UpperCAmelCase__ : List[Any] = timeit.default_timer() - starttime
return delta
UpperCAmelCase__ : int = func.__name__
return wrapper
def a__ ( lowerCAmelCase : dict , lowerCAmelCase : Optional[int]=100 , lowerCAmelCase : List[str]=None ):
'''simple docstring'''
UpperCAmelCase__ : str = []
UpperCAmelCase__ : Optional[Any] = seq_shapes or {}
for i in range(lowerCAmelCase ):
UpperCAmelCase__ : int = {}
for col_id, (k, v) in enumerate(features.items() ):
if isinstance(lowerCAmelCase , _ArrayXD ):
UpperCAmelCase__ : List[str] = np.random.rand(*v.shape ).astype(v.dtype )
elif isinstance(lowerCAmelCase , datasets.Value ):
if v.dtype == "string":
UpperCAmelCase__ : Dict = "The small grey turtle was surprisingly fast when challenged."
else:
UpperCAmelCase__ : str = np.random.randint(10 , size=1 ).astype(v.dtype ).item()
elif isinstance(lowerCAmelCase , datasets.Sequence ):
while isinstance(lowerCAmelCase , datasets.Sequence ):
UpperCAmelCase__ : List[str] = v.feature
UpperCAmelCase__ : Optional[int] = seq_shapes[k]
UpperCAmelCase__ : Optional[int] = np.random.rand(*lowerCAmelCase ).astype(v.dtype )
UpperCAmelCase__ : Union[str, Any] = data
dummy_data.append((i, example) )
return dummy_data
def a__ ( lowerCAmelCase : List[str] , lowerCAmelCase : Tuple , lowerCAmelCase : List[str]=100 , lowerCAmelCase : Optional[int]=None ):
'''simple docstring'''
UpperCAmelCase__ : int = generate_examples(lowerCAmelCase , num_examples=lowerCAmelCase , seq_shapes=lowerCAmelCase )
with ArrowWriter(features=lowerCAmelCase , path=lowerCAmelCase ) as writer:
for key, record in dummy_data:
UpperCAmelCase__ : List[Any] = features.encode_example(lowerCAmelCase )
writer.write(lowerCAmelCase )
UpperCAmelCase__ , UpperCAmelCase__ : Optional[Any] = writer.finalize()
if not num_final_examples == num_examples:
raise ValueError(
F"Error writing the dataset, wrote {num_final_examples} examples but should have written {num_examples}." )
UpperCAmelCase__ : Optional[int] = datasets.Dataset.from_file(filename=lowerCAmelCase , info=datasets.DatasetInfo(features=lowerCAmelCase ) )
return dataset
| 660 | 1 |
"""simple docstring"""
import random
def a__ ( lowerCAmelCase : int ):
'''simple docstring'''
UpperCAmelCase__ : Union[str, Any] = num - 1
UpperCAmelCase__ : Dict = 0
while s % 2 == 0:
UpperCAmelCase__ : Tuple = s // 2
t += 1
for _ in range(5 ):
UpperCAmelCase__ : Optional[int] = random.randrange(2 , num - 1 )
UpperCAmelCase__ : Optional[Any] = pow(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
if v != 1:
UpperCAmelCase__ : Union[str, Any] = 0
while v != (num - 1):
if i == t - 1:
return False
else:
UpperCAmelCase__ : Optional[int] = i + 1
UpperCAmelCase__ : Optional[int] = (v**2) % num
return True
def a__ ( lowerCAmelCase : int ):
'''simple docstring'''
if num < 2:
return False
UpperCAmelCase__ : List[str] = [
2,
3,
5,
7,
11,
13,
17,
19,
23,
29,
31,
37,
41,
43,
47,
53,
59,
61,
67,
71,
73,
79,
83,
89,
97,
101,
103,
107,
109,
113,
127,
131,
137,
139,
149,
151,
157,
163,
167,
173,
179,
181,
191,
193,
197,
199,
211,
223,
227,
229,
233,
239,
241,
251,
257,
263,
269,
271,
277,
281,
283,
293,
307,
311,
313,
317,
331,
337,
347,
349,
353,
359,
367,
373,
379,
383,
389,
397,
401,
409,
419,
421,
431,
433,
439,
443,
449,
457,
461,
463,
467,
479,
487,
491,
499,
503,
509,
521,
523,
541,
547,
557,
563,
569,
571,
577,
587,
593,
599,
601,
607,
613,
617,
619,
631,
641,
643,
647,
653,
659,
661,
673,
677,
683,
691,
701,
709,
719,
727,
733,
739,
743,
751,
757,
761,
769,
773,
787,
797,
809,
811,
821,
823,
827,
829,
839,
853,
857,
859,
863,
877,
881,
883,
887,
907,
911,
919,
929,
937,
941,
947,
953,
967,
971,
977,
983,
991,
997,
]
if num in low_primes:
return True
for prime in low_primes:
if (num % prime) == 0:
return False
return rabin_miller(lowerCAmelCase )
def a__ ( lowerCAmelCase : int = 1024 ):
'''simple docstring'''
while True:
UpperCAmelCase__ : Tuple = random.randrange(2 ** (keysize - 1) , 2 ** (keysize) )
if is_prime_low_num(lowerCAmelCase ):
return num
if __name__ == "__main__":
A__ : int = generate_large_prime()
print(("""Prime number:""", num))
print(("""is_prime_low_num:""", is_prime_low_num(num)))
| 660 |
"""simple docstring"""
from manim import *
class _lowercase ( lowerCAmelCase_ ):
'''simple docstring'''
def lowerCAmelCase__ ( self )-> Tuple:
UpperCAmelCase__ : str = Rectangle(height=0.5 , width=0.5 )
UpperCAmelCase__ : List[str] = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0 )
UpperCAmelCase__ : List[Any] = [mem.copy() for i in range(6 )]
UpperCAmelCase__ : Union[str, Any] = [mem.copy() for i in range(6 )]
UpperCAmelCase__ : List[Any] = VGroup(*__UpperCamelCase ).arrange(__UpperCamelCase , buff=0 )
UpperCAmelCase__ : int = VGroup(*__UpperCamelCase ).arrange(__UpperCamelCase , buff=0 )
UpperCAmelCase__ : Tuple = VGroup(__UpperCamelCase , __UpperCamelCase ).arrange(__UpperCamelCase , buff=0 )
UpperCAmelCase__ : Tuple = Text("CPU" , font_size=24 )
UpperCAmelCase__ : Any = Group(__UpperCamelCase , __UpperCamelCase ).arrange(__UpperCamelCase , buff=0.5 , aligned_edge=__UpperCamelCase )
cpu.move_to([-2.5, -0.5, 0] )
self.add(__UpperCamelCase )
UpperCAmelCase__ : List[Any] = [mem.copy() for i in range(4 )]
UpperCAmelCase__ : List[str] = VGroup(*__UpperCamelCase ).arrange(__UpperCamelCase , buff=0 )
UpperCAmelCase__ : Union[str, Any] = Text("GPU" , font_size=24 )
UpperCAmelCase__ : Dict = Group(__UpperCamelCase , __UpperCamelCase ).arrange(__UpperCamelCase , buff=0.5 , aligned_edge=__UpperCamelCase )
gpu.move_to([-1, -1, 0] )
self.add(__UpperCamelCase )
UpperCAmelCase__ : Optional[int] = [mem.copy() for i in range(6 )]
UpperCAmelCase__ : List[str] = VGroup(*__UpperCamelCase ).arrange(__UpperCamelCase , buff=0 )
UpperCAmelCase__ : Tuple = Text("Model" , font_size=24 )
UpperCAmelCase__ : Dict = Group(__UpperCamelCase , __UpperCamelCase ).arrange(__UpperCamelCase , buff=0.5 , aligned_edge=__UpperCamelCase )
model.move_to([3, -1.0, 0] )
self.add(__UpperCamelCase )
UpperCAmelCase__ : List[str] = []
for i, rect in enumerate(__UpperCamelCase ):
rect.set_stroke(__UpperCamelCase )
# target = fill.copy().set_fill(YELLOW, opacity=0.7)
# target.move_to(rect)
# self.add(target)
UpperCAmelCase__ : int = Rectangle(height=0.46 / 4 , width=0.46 / 3 ).set_stroke(width=0.0 ).set_fill(__UpperCamelCase , opacity=0.7 )
if i == 0:
cpu_target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT ) , buff=0.02 , direction=__UpperCamelCase )
cpu_target.set_x(cpu_target.get_x() + 0.1 )
elif i == 3:
cpu_target.next_to(cpu_targs[0] , direction=__UpperCamelCase , buff=0.0 )
else:
cpu_target.next_to(cpu_targs[i - 1] , direction=__UpperCamelCase , buff=0.0 )
self.add(__UpperCamelCase )
cpu_targs.append(__UpperCamelCase )
UpperCAmelCase__ : List[Any] = [mem.copy() for i in range(6 )]
UpperCAmelCase__ : Any = VGroup(*__UpperCamelCase ).arrange(__UpperCamelCase , buff=0 )
UpperCAmelCase__ : Tuple = Text("Loaded Checkpoint" , font_size=24 )
UpperCAmelCase__ : Any = Group(__UpperCamelCase , __UpperCamelCase ).arrange(__UpperCamelCase , aligned_edge=__UpperCamelCase , buff=0.4 )
checkpoint.move_to([3, 0.5, 0] )
UpperCAmelCase__ : Optional[Any] = Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
UpperCAmelCase__ : Any = MarkupText(
F"<b>Key:</b>\n\n<span fgcolor='{YELLOW}'>●</span> Empty Model" , font_size=18 , )
key_text.move_to([-5, 2.4, 0] )
self.add(__UpperCamelCase , __UpperCamelCase )
UpperCAmelCase__ : str = MarkupText(
F"<span fgcolor='{BLUE}'>●</span> Checkpoint" , font_size=18 , )
blue_text.next_to(__UpperCamelCase , DOWN * 2.4 , aligned_edge=key_text.get_left() )
UpperCAmelCase__ : Optional[Any] = MarkupText(
F"Next, a <i><span fgcolor=\"{BLUE}\">second</span></i> model is loaded into memory,\nwith the weights of a <span fgcolor=\"{BLUE}\">single shard</span>." , font_size=24 , )
step_a.move_to([2, 2, 0] )
self.play(Write(__UpperCamelCase ) , Write(__UpperCamelCase ) )
self.play(Write(__UpperCamelCase , run_time=1 ) , Create(__UpperCamelCase , run_time=1 ) )
UpperCAmelCase__ : Union[str, Any] = []
UpperCAmelCase__ : List[str] = []
for i, rect in enumerate(__UpperCamelCase ):
UpperCAmelCase__ : Optional[Any] = fill.copy().set_fill(__UpperCamelCase , opacity=0.7 )
target.move_to(__UpperCamelCase )
first_animations.append(GrowFromCenter(__UpperCamelCase , run_time=1 ) )
UpperCAmelCase__ : List[str] = target.copy()
cpu_target.generate_target()
if i < 5:
cpu_target.target.move_to(cpu_left_col_base[i + 1] )
else:
cpu_target.target.move_to(cpu_right_col_base[i - 5] )
second_animations.append(MoveToTarget(__UpperCamelCase , run_time=1.5 ) )
self.play(*__UpperCamelCase )
self.play(*__UpperCamelCase )
self.wait()
| 660 | 1 |
"""simple docstring"""
import argparse
import ast
import logging
import os
import sys
import pandas as pd
import torch
from tqdm import tqdm
from transformers import BartForConditionalGeneration, RagRetriever, RagSequenceForGeneration, RagTokenForGeneration
from transformers import logging as transformers_logging
sys.path.append(os.path.join(os.getcwd())) # noqa: E402 # isort:skip
from utils_rag import exact_match_score, fa_score # noqa: E402 # isort:skip
A__ : str = logging.getLogger(__name__)
logging.basicConfig(level=logging.INFO)
transformers_logging.set_verbosity_info()
def a__ ( lowerCAmelCase : Optional[int] ):
'''simple docstring'''
if "token" in model_name_or_path:
return "rag_token"
if "sequence" in model_name_or_path:
return "rag_sequence"
if "bart" in model_name_or_path:
return "bart"
return None
def a__ ( lowerCAmelCase : Tuple , lowerCAmelCase : List[Any] , lowerCAmelCase : Union[str, Any] ):
'''simple docstring'''
return max(metric_fn(lowerCAmelCase , lowerCAmelCase ) for gt in ground_truths )
def a__ ( lowerCAmelCase : Union[str, Any] , lowerCAmelCase : str , lowerCAmelCase : List[str] ):
'''simple docstring'''
UpperCAmelCase__ : Optional[Any] = [line.strip() for line in open(lowerCAmelCase , "r" ).readlines()]
UpperCAmelCase__ : Tuple = []
if args.gold_data_mode == "qa":
UpperCAmelCase__ : int = pd.read_csv(lowerCAmelCase , sep="\t" , header=lowerCAmelCase )
for answer_list in data[1]:
UpperCAmelCase__ : List[str] = ast.literal_eval(lowerCAmelCase )
answers.append(lowerCAmelCase )
else:
UpperCAmelCase__ : Tuple = [line.strip() for line in open(lowerCAmelCase , "r" ).readlines()]
UpperCAmelCase__ : Dict = [[reference] for reference in references]
UpperCAmelCase__ : Optional[int] = 0
for prediction, ground_truths in zip(lowerCAmelCase , lowerCAmelCase ):
total += 1
em += metric_max_over_ground_truths(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
fa += metric_max_over_ground_truths(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
UpperCAmelCase__ : Optional[int] = 100.0 * em / total
UpperCAmelCase__ : List[Any] = 100.0 * fa / total
logger.info(F"F1: {fa:.2f}" )
logger.info(F"EM: {em:.2f}" )
def a__ ( lowerCAmelCase : List[Any] , lowerCAmelCase : Dict , lowerCAmelCase : str ):
'''simple docstring'''
UpperCAmelCase__ : Dict = args.k
UpperCAmelCase__ : Optional[int] = [line.strip() for line in open(lowerCAmelCase , "r" ).readlines()]
UpperCAmelCase__ : List[Any] = [line.strip() for line in open(lowerCAmelCase , "r" ).readlines()]
UpperCAmelCase__ : Optional[int] = 0
for hypo, reference in zip(lowerCAmelCase , lowerCAmelCase ):
UpperCAmelCase__ : Union[str, Any] = set(hypo.split("\t" )[:k] )
UpperCAmelCase__ : Optional[Any] = set(reference.split("\t" ) )
total += 1
em += len(hypo_provenance & ref_provenance ) / k
UpperCAmelCase__ : int = 100.0 * em / total
logger.info(F"Precision@{k}: {em: .2f}" )
def a__ ( lowerCAmelCase : str , lowerCAmelCase : Optional[int] , lowerCAmelCase : Optional[int] ):
'''simple docstring'''
def strip_title(lowerCAmelCase : List[str] ):
if title.startswith("\"" ):
UpperCAmelCase__ : Any = title[1:]
if title.endswith("\"" ):
UpperCAmelCase__ : Union[str, Any] = title[:-1]
return title
UpperCAmelCase__ : Dict = rag_model.retriever.question_encoder_tokenizer.batch_encode_plus(
lowerCAmelCase , return_tensors="pt" , padding=lowerCAmelCase , truncation=lowerCAmelCase , )["input_ids"].to(args.device )
UpperCAmelCase__ : Any = rag_model.rag.question_encoder(lowerCAmelCase )
UpperCAmelCase__ : Union[str, Any] = question_enc_outputs[0]
UpperCAmelCase__ : List[str] = rag_model.retriever(
lowerCAmelCase , question_enc_pool_output.cpu().detach().to(torch.floataa ).numpy() , prefix=rag_model.rag.generator.config.prefix , n_docs=rag_model.config.n_docs , return_tensors="pt" , )
UpperCAmelCase__ : str = rag_model.retriever.index.get_doc_dicts(result.doc_ids )
UpperCAmelCase__ : List[Any] = []
for docs in all_docs:
UpperCAmelCase__ : Union[str, Any] = [strip_title(lowerCAmelCase ) for title in docs["title"]]
provenance_strings.append("\t".join(lowerCAmelCase ) )
return provenance_strings
def a__ ( lowerCAmelCase : List[Any] , lowerCAmelCase : Optional[int] , lowerCAmelCase : Optional[Any] ):
'''simple docstring'''
with torch.no_grad():
UpperCAmelCase__ : Optional[Any] = rag_model.retriever.question_encoder_tokenizer.batch_encode_plus(
lowerCAmelCase , return_tensors="pt" , padding=lowerCAmelCase , truncation=lowerCAmelCase )
UpperCAmelCase__ : Union[str, Any] = inputs_dict.input_ids.to(args.device )
UpperCAmelCase__ : Any = inputs_dict.attention_mask.to(args.device )
UpperCAmelCase__ : Tuple = rag_model.generate( # rag_model overwrites generate
lowerCAmelCase , attention_mask=lowerCAmelCase , num_beams=args.num_beams , min_length=args.min_length , max_length=args.max_length , early_stopping=lowerCAmelCase , num_return_sequences=1 , bad_words_ids=[[0, 0]] , )
UpperCAmelCase__ : Any = rag_model.retriever.generator_tokenizer.batch_decode(lowerCAmelCase , skip_special_tokens=lowerCAmelCase )
if args.print_predictions:
for q, a in zip(lowerCAmelCase , lowerCAmelCase ):
logger.info("Q: {} - A: {}".format(lowerCAmelCase , lowerCAmelCase ) )
return answers
def a__ ( ):
'''simple docstring'''
UpperCAmelCase__ : Tuple = argparse.ArgumentParser()
parser.add_argument(
"--model_type" , choices=["rag_sequence", "rag_token", "bart"] , type=lowerCAmelCase , help=(
"RAG model type: rag_sequence, rag_token or bart, if none specified, the type is inferred from the"
" model_name_or_path"
) , )
parser.add_argument(
"--index_name" , default=lowerCAmelCase , choices=["exact", "compressed", "legacy"] , type=lowerCAmelCase , help="RAG model retriever type" , )
parser.add_argument(
"--index_path" , default=lowerCAmelCase , type=lowerCAmelCase , help="Path to the retrieval index" , )
parser.add_argument("--n_docs" , default=5 , type=lowerCAmelCase , help="Number of retrieved docs" )
parser.add_argument(
"--model_name_or_path" , default=lowerCAmelCase , type=lowerCAmelCase , required=lowerCAmelCase , help="Path to pretrained checkpoints or model identifier from huggingface.co/models" , )
parser.add_argument(
"--eval_mode" , choices=["e2e", "retrieval"] , default="e2e" , type=lowerCAmelCase , help=(
"Evaluation mode, e2e calculates exact match and F1 of the downstream task, retrieval calculates"
" precision@k."
) , )
parser.add_argument("--k" , default=1 , type=lowerCAmelCase , help="k for the precision@k calculation" )
parser.add_argument(
"--evaluation_set" , default=lowerCAmelCase , type=lowerCAmelCase , required=lowerCAmelCase , help="Path to a file containing evaluation samples" , )
parser.add_argument(
"--gold_data_path" , default=lowerCAmelCase , type=lowerCAmelCase , required=lowerCAmelCase , help="Path to a tab-separated file with gold samples" , )
parser.add_argument(
"--gold_data_mode" , default="qa" , type=lowerCAmelCase , choices=["qa", "ans"] , help=(
"Format of the gold data file"
"qa - a single line in the following format: question [tab] answer_list"
"ans - a single line of the gold file contains the expected answer string"
) , )
parser.add_argument(
"--predictions_path" , type=lowerCAmelCase , default="predictions.txt" , help="Name of the predictions file, to be stored in the checkpoints directory" , )
parser.add_argument(
"--eval_all_checkpoints" , action="store_true" , help="Evaluate all checkpoints starting with the same prefix as model_name ending and ending with step number" , )
parser.add_argument(
"--eval_batch_size" , default=8 , type=lowerCAmelCase , help="Batch size per GPU/CPU for evaluation." , )
parser.add_argument(
"--recalculate" , help="Recalculate predictions even if the prediction file exists" , action="store_true" , )
parser.add_argument(
"--num_beams" , default=4 , type=lowerCAmelCase , help="Number of beams to be used when generating answers" , )
parser.add_argument("--min_length" , default=1 , type=lowerCAmelCase , help="Min length of the generated answers" )
parser.add_argument("--max_length" , default=50 , type=lowerCAmelCase , help="Max length of the generated answers" )
parser.add_argument(
"--print_predictions" , action="store_true" , help="If True, prints predictions while evaluating." , )
parser.add_argument(
"--print_docs" , action="store_true" , help="If True, prints docs retried while generating." , )
UpperCAmelCase__ : Optional[Any] = parser.parse_args()
UpperCAmelCase__ : List[str] = torch.device("cuda" if torch.cuda.is_available() else "cpu" )
return args
def a__ ( lowerCAmelCase : List[Any] ):
'''simple docstring'''
UpperCAmelCase__ : Optional[int] = {}
if args.model_type is None:
UpperCAmelCase__ : Optional[int] = infer_model_type(args.model_name_or_path )
assert args.model_type is not None
if args.model_type.startswith("rag" ):
UpperCAmelCase__ : List[str] = RagTokenForGeneration if args.model_type == "rag_token" else RagSequenceForGeneration
UpperCAmelCase__ : Any = args.n_docs
if args.index_name is not None:
UpperCAmelCase__ : Union[str, Any] = args.index_name
if args.index_path is not None:
UpperCAmelCase__ : Optional[Any] = args.index_path
else:
UpperCAmelCase__ : Optional[int] = BartForConditionalGeneration
UpperCAmelCase__ : Union[str, Any] = (
[f.path for f in os.scandir(args.model_name_or_path ) if f.is_dir()]
if args.eval_all_checkpoints
else [args.model_name_or_path]
)
logger.info("Evaluate the following checkpoints: %s" , lowerCAmelCase )
UpperCAmelCase__ : Optional[int] = get_scores if args.eval_mode == "e2e" else get_precision_at_k
UpperCAmelCase__ : Any = evaluate_batch_eae if args.eval_mode == "e2e" else evaluate_batch_retrieval
for checkpoint in checkpoints:
if os.path.exists(args.predictions_path ) and (not args.recalculate):
logger.info("Calculating metrics based on an existing predictions file: {}".format(args.predictions_path ) )
score_fn(lowerCAmelCase , args.predictions_path , args.gold_data_path )
continue
logger.info("***** Running evaluation for {} *****".format(lowerCAmelCase ) )
logger.info(" Batch size = %d" , args.eval_batch_size )
logger.info(" Predictions will be stored under {}".format(args.predictions_path ) )
if args.model_type.startswith("rag" ):
UpperCAmelCase__ : Tuple = RagRetriever.from_pretrained(lowerCAmelCase , **lowerCAmelCase )
UpperCAmelCase__ : int = model_class.from_pretrained(lowerCAmelCase , retriever=lowerCAmelCase , **lowerCAmelCase )
model.retriever.init_retrieval()
else:
UpperCAmelCase__ : int = model_class.from_pretrained(lowerCAmelCase , **lowerCAmelCase )
model.to(args.device )
with open(args.evaluation_set , "r" ) as eval_file, open(args.predictions_path , "w" ) as preds_file:
UpperCAmelCase__ : str = []
for line in tqdm(lowerCAmelCase ):
questions.append(line.strip() )
if len(lowerCAmelCase ) == args.eval_batch_size:
UpperCAmelCase__ : str = evaluate_batch_fn(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
preds_file.write("\n".join(lowerCAmelCase ) + "\n" )
preds_file.flush()
UpperCAmelCase__ : Optional[int] = []
if len(lowerCAmelCase ) > 0:
UpperCAmelCase__ : Optional[int] = evaluate_batch_fn(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
preds_file.write("\n".join(lowerCAmelCase ) )
preds_file.flush()
score_fn(lowerCAmelCase , args.predictions_path , args.gold_data_path )
if __name__ == "__main__":
A__ : List[Any] = get_args()
main(args)
| 660 |
"""simple docstring"""
import warnings
from pathlib import Path
from typing import List, Tuple, Union
import fire
from torch import nn
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer, PreTrainedModel
from transformers.utils import logging
A__ : Tuple = logging.get_logger(__name__)
def a__ ( lowerCAmelCase : nn.ModuleList , lowerCAmelCase : nn.ModuleList , lowerCAmelCase : List[int] ):
'''simple docstring'''
UpperCAmelCase__ : Optional[Any] = nn.ModuleList([src_layers[i] for i in layers_to_copy] )
assert len(lowerCAmelCase ) == len(lowerCAmelCase ), F"{len(lowerCAmelCase )} != {len(lowerCAmelCase )}"
dest_layers.load_state_dict(layers_to_copy.state_dict() )
A__ : List[Any] = {
# maps num layers in teacher -> num_layers in student -> which teacher layers to copy.
# 12: bart, 16: pegasus, 6: marian/Helsinki-NLP
12: {
1: [0], # This says that if the teacher has 12 layers and the student has 1, copy layer 0 of the teacher
2: [0, 6],
3: [0, 6, 11],
4: [0, 4, 8, 11],
6: [0, 2, 4, 7, 9, 11],
9: [0, 1, 2, 4, 5, 7, 9, 10, 11],
12: list(range(12)),
},
16: { # maps num layers in student -> which teacher layers to copy
1: [0],
2: [0, 15],
3: [0, 8, 15],
4: [0, 5, 10, 15],
6: [0, 3, 6, 9, 12, 15],
8: [0, 2, 4, 6, 8, 10, 12, 15],
9: [0, 1, 3, 5, 7, 9, 11, 13, 15],
12: [0, 1, 2, 3, 4, 5, 6, 7, 9, 11, 13, 15],
16: list(range(16)),
},
6: {1: [0], 2: [0, 5], 3: [0, 2, 5], 4: [0, 1, 3, 5], 6: list(range(6))},
}
A__ : List[Any] = {
# maps num layers in student -> which teacher layers to copy.
6: {1: [5], 2: [3, 5], 3: [1, 4, 5], 4: [1, 2, 4, 5]},
12: {1: [11], 2: [5, 11], 3: [3, 7, 11], 6: [1, 3, 5, 8, 10, 11]},
16: {1: [15], 4: [4, 9, 12, 15], 8: [1, 3, 5, 7, 9, 11, 13, 15]},
}
def a__ ( lowerCAmelCase : Dict , lowerCAmelCase : Dict ):
'''simple docstring'''
try:
UpperCAmelCase__ : Tuple = LAYERS_TO_COPY[n_teacher][n_student]
return val
except KeyError:
if n_student != n_teacher:
warnings.warn(
F"no hardcoded layers to copy for teacher {n_teacher} -> student {n_student}, defaulting to first"
F" {n_student}" )
return list(range(lowerCAmelCase ) )
def a__ ( lowerCAmelCase : int , lowerCAmelCase : Tuple ):
'''simple docstring'''
if n_student > n_teacher:
raise ValueError(F"Cannot perform intermediate supervision for student {n_student} > teacher {n_teacher}" )
elif n_teacher == n_student:
return list(range(lowerCAmelCase ) )
elif n_student == 1:
return [n_teacher - 1]
else:
return LAYERS_TO_SUPERVISE[n_teacher][n_student]
def a__ ( lowerCAmelCase : Union[str, PreTrainedModel] , lowerCAmelCase : Union[str, Path] = "student" , lowerCAmelCase : Union[int, None] = None , lowerCAmelCase : Union[int, None] = None , lowerCAmelCase : List[str]=False , lowerCAmelCase : List[str]=None , lowerCAmelCase : List[str]=None , **lowerCAmelCase : List[str] , ):
'''simple docstring'''
UpperCAmelCase__ : List[str] = "encoder_layers and decoder_layers cannot be both None-- you would just have an identical teacher."
assert (e is not None) or (d is not None), _msg
if isinstance(lowerCAmelCase , lowerCAmelCase ):
AutoTokenizer.from_pretrained(lowerCAmelCase ).save_pretrained(lowerCAmelCase ) # purely for convenience
UpperCAmelCase__ : List[str] = AutoModelForSeqaSeqLM.from_pretrained(lowerCAmelCase ).eval()
else:
assert isinstance(lowerCAmelCase , lowerCAmelCase ), F"teacher must be a model or string got type {type(lowerCAmelCase )}"
UpperCAmelCase__ : int = teacher.config.to_diff_dict()
try:
UpperCAmelCase__ , UpperCAmelCase__ : Optional[Any] = teacher.config.encoder_layers, teacher.config.decoder_layers
if e is None:
UpperCAmelCase__ : Tuple = teacher_e
if d is None:
UpperCAmelCase__ : str = teacher_d
init_kwargs.update({"encoder_layers": e, "decoder_layers": d} )
except AttributeError: # T5
if hasattr(teacher.config , "num_encoder_layers" ):
UpperCAmelCase__ , UpperCAmelCase__ : Union[str, Any] = teacher.config.num_encoder_layers, teacher.config.num_decoder_layers
else:
UpperCAmelCase__ , UpperCAmelCase__ : Union[str, Any] = teacher.config.num_layers, teacher.config.num_decoder_layers
if e is None:
UpperCAmelCase__ : Optional[Any] = teacher_e
if d is None:
UpperCAmelCase__ : Optional[Any] = teacher_d
if hasattr(teacher.config , "num_encoder_layers" ):
init_kwargs.update({"num_encoder_layers": e, "num_decoder_layers": d} )
else:
init_kwargs.update({"num_layers": e, "num_decoder_layers": d} )
# Kwargs to instantiate student: teacher kwargs with updated layer numbers + **extra_config_kwargs
init_kwargs.update(lowerCAmelCase )
# Copy weights
UpperCAmelCase__ : Tuple = teacher.config_class(**lowerCAmelCase )
UpperCAmelCase__ : List[str] = AutoModelForSeqaSeqLM.from_config(lowerCAmelCase )
# Start by copying the full teacher state dict this will copy the first N teacher layers to the student.
UpperCAmelCase__ : Optional[int] = student.load_state_dict(teacher.state_dict() , strict=lowerCAmelCase )
assert info.missing_keys == [], info.missing_keys # every student key should have a teacher keys.
if copy_first_teacher_layers: # Our copying is done. We just log and save
UpperCAmelCase__ , UpperCAmelCase__ : int = list(range(lowerCAmelCase ) ), list(range(lowerCAmelCase ) )
logger.info(
F"Copied encoder layers {e_layers_to_copy} and decoder layers {d_layers_to_copy}. Saving them to"
F" {save_path}" )
student.save_pretrained(lowerCAmelCase )
return student, e_layers_to_copy, d_layers_to_copy
# Decide which layers of the teacher to copy. Not exactly alternating -- we try to keep first and last layer.
if e_layers_to_copy is None:
UpperCAmelCase__ : List[int] = pick_layers_to_copy(lowerCAmelCase , lowerCAmelCase )
if d_layers_to_copy is None:
UpperCAmelCase__ : List[int] = pick_layers_to_copy(lowerCAmelCase , lowerCAmelCase )
try:
if hasattr(
lowerCAmelCase , "prophetnet" ): # For ProphetNet, student.model.encoder.layers is called student.prophetnet.encoder.layers
copy_layers(teacher.prophetnet.encoder.layers , student.prophetnet.encoder.layers , lowerCAmelCase )
copy_layers(teacher.prophetnet.decoder.layers , student.prophetnet.decoder.layers , lowerCAmelCase )
else:
copy_layers(teacher.model.encoder.layers , student.model.encoder.layers , lowerCAmelCase )
copy_layers(teacher.model.decoder.layers , student.model.decoder.layers , lowerCAmelCase )
except AttributeError: # For t5, student.model.encoder.layers is called student.encoder.block
copy_layers(teacher.encoder.block , student.encoder.block , lowerCAmelCase )
copy_layers(teacher.decoder.block , student.decoder.block , lowerCAmelCase )
logger.info(
F"Copied encoder layers {e_layers_to_copy} and decoder layers {d_layers_to_copy}. Saving them to {save_path}" )
UpperCAmelCase__ : int = {
"teacher_type": teacher.config.model_type,
"copied_encoder_layers": e_layers_to_copy,
"copied_decoder_layers": d_layers_to_copy,
}
student.save_pretrained(lowerCAmelCase )
# Save information about copying for easier reproducibility
return student, e_layers_to_copy, d_layers_to_copy
if __name__ == "__main__":
fire.Fire(create_student_by_copying_alternating_layers)
| 660 | 1 |
"""simple docstring"""
from PIL import Image
def a__ ( lowerCAmelCase : Image , lowerCAmelCase : float ):
'''simple docstring'''
def brightness(lowerCAmelCase : int ) -> float:
return 128 + level + (c - 128)
if not -255.0 <= level <= 255.0:
raise ValueError("level must be between -255.0 (black) and 255.0 (white)" )
return img.point(lowerCAmelCase )
if __name__ == "__main__":
# Load image
with Image.open("""image_data/lena.jpg""") as img:
# Change brightness to 100
A__ : Union[str, Any] = change_brightness(img, 100)
brigt_img.save("""image_data/lena_brightness.png""", format="""png""")
| 660 |
"""simple docstring"""
import unittest
import numpy as np
from diffusers import LMSDiscreteScheduler, OnnxStableDiffusionInpaintPipeline
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
nightly,
require_onnxruntime,
require_torch_gpu,
)
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class _lowercase ( lowerCAmelCase_ , unittest.TestCase ):
'''simple docstring'''
pass
@nightly
@require_onnxruntime
@require_torch_gpu
class _lowercase ( unittest.TestCase ):
'''simple docstring'''
@property
def lowerCAmelCase__ ( self )-> int:
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def lowerCAmelCase__ ( self )-> Any:
UpperCAmelCase__ : Tuple = ort.SessionOptions()
UpperCAmelCase__ : List[str] = False
return options
def lowerCAmelCase__ ( self )-> List[str]:
UpperCAmelCase__ : Union[str, Any] = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/in_paint/overture-creations-5sI6fQgYIuo.png" )
UpperCAmelCase__ : int = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/in_paint/overture-creations-5sI6fQgYIuo_mask.png" )
UpperCAmelCase__ : str = OnnxStableDiffusionInpaintPipeline.from_pretrained(
"runwayml/stable-diffusion-inpainting" , revision="onnx" , safety_checker=__UpperCamelCase , feature_extractor=__UpperCamelCase , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=__UpperCamelCase )
UpperCAmelCase__ : int = "A red cat sitting on a park bench"
UpperCAmelCase__ : Tuple = np.random.RandomState(0 )
UpperCAmelCase__ : Any = pipe(
prompt=__UpperCamelCase , image=__UpperCamelCase , mask_image=__UpperCamelCase , guidance_scale=7.5 , num_inference_steps=10 , generator=__UpperCamelCase , output_type="np" , )
UpperCAmelCase__ : Tuple = output.images
UpperCAmelCase__ : Dict = images[0, 2_55:2_58, 2_55:2_58, -1]
assert images.shape == (1, 5_12, 5_12, 3)
UpperCAmelCase__ : Union[str, Any] = np.array([0.2514, 0.3007, 0.3517, 0.1790, 0.2382, 0.3167, 0.1944, 0.2273, 0.2464] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def lowerCAmelCase__ ( self )-> Tuple:
UpperCAmelCase__ : Tuple = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/in_paint/overture-creations-5sI6fQgYIuo.png" )
UpperCAmelCase__ : Tuple = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/in_paint/overture-creations-5sI6fQgYIuo_mask.png" )
UpperCAmelCase__ : Optional[Any] = LMSDiscreteScheduler.from_pretrained(
"runwayml/stable-diffusion-inpainting" , subfolder="scheduler" , revision="onnx" )
UpperCAmelCase__ : Optional[Any] = OnnxStableDiffusionInpaintPipeline.from_pretrained(
"runwayml/stable-diffusion-inpainting" , revision="onnx" , scheduler=__UpperCamelCase , safety_checker=__UpperCamelCase , feature_extractor=__UpperCamelCase , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=__UpperCamelCase )
UpperCAmelCase__ : int = "A red cat sitting on a park bench"
UpperCAmelCase__ : List[str] = np.random.RandomState(0 )
UpperCAmelCase__ : str = pipe(
prompt=__UpperCamelCase , image=__UpperCamelCase , mask_image=__UpperCamelCase , guidance_scale=7.5 , num_inference_steps=20 , generator=__UpperCamelCase , output_type="np" , )
UpperCAmelCase__ : List[str] = output.images
UpperCAmelCase__ : List[Any] = images[0, 2_55:2_58, 2_55:2_58, -1]
assert images.shape == (1, 5_12, 5_12, 3)
UpperCAmelCase__ : int = np.array([0.0086, 0.0077, 0.0083, 0.0093, 0.0107, 0.0139, 0.0094, 0.0097, 0.0125] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
| 660 | 1 |
"""simple docstring"""
from __future__ import annotations
from typing import Any
class _lowercase :
'''simple docstring'''
def __init__( self , __UpperCamelCase )-> None:
UpperCAmelCase__ : Any = num_of_nodes
UpperCAmelCase__ : list[list[int]] = []
UpperCAmelCase__ : dict[int, int] = {}
def lowerCAmelCase__ ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )-> None:
self.m_edges.append([u_node, v_node, weight] )
def lowerCAmelCase__ ( self , __UpperCamelCase )-> int:
if self.m_component[u_node] == u_node:
return u_node
return self.find_component(self.m_component[u_node] )
def lowerCAmelCase__ ( self , __UpperCamelCase )-> None:
if self.m_component[u_node] != u_node:
for k in self.m_component:
UpperCAmelCase__ : List[Any] = self.find_component(__UpperCamelCase )
def lowerCAmelCase__ ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )-> None:
if component_size[u_node] <= component_size[v_node]:
UpperCAmelCase__ : str = v_node
component_size[v_node] += component_size[u_node]
self.set_component(__UpperCamelCase )
elif component_size[u_node] >= component_size[v_node]:
UpperCAmelCase__ : str = self.find_component(__UpperCamelCase )
component_size[u_node] += component_size[v_node]
self.set_component(__UpperCamelCase )
def lowerCAmelCase__ ( self )-> None:
UpperCAmelCase__ : Optional[int] = []
UpperCAmelCase__ : int = 0
UpperCAmelCase__ : list[Any] = [-1] * self.m_num_of_nodes
# A list of components (initialized to all of the nodes)
for node in range(self.m_num_of_nodes ):
self.m_component.update({node: node} )
component_size.append(1 )
UpperCAmelCase__ : Tuple = self.m_num_of_nodes
while num_of_components > 1:
for edge in self.m_edges:
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : str = edge
UpperCAmelCase__ : List[Any] = self.m_component[u]
UpperCAmelCase__ : Tuple = self.m_component[v]
if u_component != v_component:
for component in (u_component, v_component):
if (
minimum_weight_edge[component] == -1
or minimum_weight_edge[component][2] > w
):
UpperCAmelCase__ : Dict = [u, v, w]
for edge in minimum_weight_edge:
if isinstance(__UpperCamelCase , __UpperCamelCase ):
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : str = edge
UpperCAmelCase__ : Any = self.m_component[u]
UpperCAmelCase__ : List[Any] = self.m_component[v]
if u_component != v_component:
mst_weight += w
self.union(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
print(F"Added edge [{u} - {v}]\nAdded weight: {w}\n" )
num_of_components -= 1
UpperCAmelCase__ : Union[str, Any] = [-1] * self.m_num_of_nodes
print(F"The total weight of the minimal spanning tree is: {mst_weight}" )
def a__ ( ):
'''simple docstring'''
if __name__ == "__main__":
import doctest
doctest.testmod()
| 660 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
A__ : Union[str, Any] = logging.get_logger(__name__)
A__ : Optional[int] = {
"""microsoft/table-transformer-detection""": (
"""https://huggingface.co/microsoft/table-transformer-detection/resolve/main/config.json"""
),
}
class _lowercase ( lowerCAmelCase_ ):
'''simple docstring'''
_A = 'table-transformer'
_A = ['past_key_values']
_A = {
'hidden_size': 'd_model',
'num_attention_heads': 'encoder_attention_heads',
}
def __init__( self , __UpperCamelCase=True , __UpperCamelCase=None , __UpperCamelCase=3 , __UpperCamelCase=1_00 , __UpperCamelCase=6 , __UpperCamelCase=20_48 , __UpperCamelCase=8 , __UpperCamelCase=6 , __UpperCamelCase=20_48 , __UpperCamelCase=8 , __UpperCamelCase=0.0 , __UpperCamelCase=0.0 , __UpperCamelCase=True , __UpperCamelCase="relu" , __UpperCamelCase=2_56 , __UpperCamelCase=0.1 , __UpperCamelCase=0.0 , __UpperCamelCase=0.0 , __UpperCamelCase=0.02 , __UpperCamelCase=1.0 , __UpperCamelCase=False , __UpperCamelCase="sine" , __UpperCamelCase="resnet50" , __UpperCamelCase=True , __UpperCamelCase=False , __UpperCamelCase=1 , __UpperCamelCase=5 , __UpperCamelCase=2 , __UpperCamelCase=1 , __UpperCamelCase=1 , __UpperCamelCase=5 , __UpperCamelCase=2 , __UpperCamelCase=0.1 , **__UpperCamelCase , )-> List[Any]:
if backbone_config is not None and use_timm_backbone:
raise ValueError("You can't specify both `backbone_config` and `use_timm_backbone`." )
if not use_timm_backbone:
if backbone_config is None:
logger.info("`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone." )
UpperCAmelCase__ : Any = CONFIG_MAPPING["resnet"](out_features=["stage4"] )
elif isinstance(__UpperCamelCase , __UpperCamelCase ):
UpperCAmelCase__ : int = backbone_config.get("model_type" )
UpperCAmelCase__ : Optional[Any] = CONFIG_MAPPING[backbone_model_type]
UpperCAmelCase__ : int = config_class.from_dict(__UpperCamelCase )
# set timm attributes to None
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : List[str] = None, None, None
UpperCAmelCase__ : Optional[int] = use_timm_backbone
UpperCAmelCase__ : Dict = backbone_config
UpperCAmelCase__ : List[Any] = num_channels
UpperCAmelCase__ : Any = num_queries
UpperCAmelCase__ : int = d_model
UpperCAmelCase__ : Optional[int] = encoder_ffn_dim
UpperCAmelCase__ : str = encoder_layers
UpperCAmelCase__ : Dict = encoder_attention_heads
UpperCAmelCase__ : Optional[Any] = decoder_ffn_dim
UpperCAmelCase__ : Tuple = decoder_layers
UpperCAmelCase__ : Optional[Any] = decoder_attention_heads
UpperCAmelCase__ : List[str] = dropout
UpperCAmelCase__ : Tuple = attention_dropout
UpperCAmelCase__ : List[Any] = activation_dropout
UpperCAmelCase__ : Dict = activation_function
UpperCAmelCase__ : Optional[Any] = init_std
UpperCAmelCase__ : List[str] = init_xavier_std
UpperCAmelCase__ : int = encoder_layerdrop
UpperCAmelCase__ : Tuple = decoder_layerdrop
UpperCAmelCase__ : int = encoder_layers
UpperCAmelCase__ : Dict = auxiliary_loss
UpperCAmelCase__ : Union[str, Any] = position_embedding_type
UpperCAmelCase__ : List[str] = backbone
UpperCAmelCase__ : List[Any] = use_pretrained_backbone
UpperCAmelCase__ : List[str] = dilation
# Hungarian matcher
UpperCAmelCase__ : Dict = class_cost
UpperCAmelCase__ : Any = bbox_cost
UpperCAmelCase__ : Tuple = giou_cost
# Loss coefficients
UpperCAmelCase__ : Any = mask_loss_coefficient
UpperCAmelCase__ : Dict = dice_loss_coefficient
UpperCAmelCase__ : Any = bbox_loss_coefficient
UpperCAmelCase__ : Tuple = giou_loss_coefficient
UpperCAmelCase__ : List[Any] = eos_coefficient
super().__init__(is_encoder_decoder=__UpperCamelCase , **__UpperCamelCase )
@property
def lowerCAmelCase__ ( self )-> int:
return self.encoder_attention_heads
@property
def lowerCAmelCase__ ( self )-> int:
return self.d_model
class _lowercase ( lowerCAmelCase_ ):
'''simple docstring'''
_A = version.parse('1.11' )
@property
def lowerCAmelCase__ ( self )-> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
("pixel_mask", {0: "batch"}),
] )
@property
def lowerCAmelCase__ ( self )-> float:
return 1E-5
@property
def lowerCAmelCase__ ( self )-> int:
return 12
| 660 | 1 |
"""simple docstring"""
A__ : Union[str, Any] = {"""a""": ["""c""", """b"""], """b""": ["""d""", """e"""], """c""": [], """d""": [], """e""": []}
A__ : Union[str, Any] = ["""a""", """b""", """c""", """d""", """e"""]
def a__ ( lowerCAmelCase : Any , lowerCAmelCase : List[str] , lowerCAmelCase : Dict ):
'''simple docstring'''
UpperCAmelCase__ : str = start
# add current to visited
visited.append(lowerCAmelCase )
UpperCAmelCase__ : List[str] = edges[current]
for neighbor in neighbors:
# if neighbor not in visited, visit
if neighbor not in visited:
UpperCAmelCase__ : List[Any] = topological_sort(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
# if all neighbors visited add current to sort
sort.append(lowerCAmelCase )
# if all vertices haven't been visited select a new one to visit
if len(lowerCAmelCase ) != len(lowerCAmelCase ):
for vertice in vertices:
if vertice not in visited:
UpperCAmelCase__ : Optional[int] = topological_sort(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
# return sort
return sort
if __name__ == "__main__":
A__ : List[str] = topological_sort("""a""", [], [])
print(sort)
| 660 |
"""simple docstring"""
import argparse
import shutil
import time
from json import JSONDecodeError
from logging import getLogger
from pathlib import Path
from typing import Dict, List
import torch
from torch.utils.data import DataLoader
from tqdm import tqdm
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
from utils import (
SeqaSeqDataset,
calculate_bleu,
calculate_rouge,
chunks,
lmap,
load_json,
parse_numeric_n_bool_cl_kwargs,
save_json,
use_task_specific_params,
write_txt_file,
)
A__ : int = getLogger(__name__)
def a__ ( lowerCAmelCase : List[Any] , lowerCAmelCase : str , lowerCAmelCase : str , lowerCAmelCase : int = 8 , lowerCAmelCase : int = 1024 , lowerCAmelCase : List[Any]="val" , lowerCAmelCase : str=None , lowerCAmelCase : int=False , lowerCAmelCase : Dict="summarization" , lowerCAmelCase : int=None , lowerCAmelCase : List[str]=1 , lowerCAmelCase : Dict = None , lowerCAmelCase : List[str]="" , **lowerCAmelCase : int , ):
'''simple docstring'''
UpperCAmelCase__ : Dict = str(lowerCAmelCase )
assert local_rank is not None
torch.distributed.init_process_group(backend="nccl" , rank=lowerCAmelCase )
UpperCAmelCase__ : List[str] = Path(lowerCAmelCase )
UpperCAmelCase__ : str = save_dir.joinpath(F"rank_{local_rank}_output.json" )
torch.cuda.set_device(lowerCAmelCase )
UpperCAmelCase__ : Optional[Any] = AutoModelForSeqaSeqLM.from_pretrained(lowerCAmelCase ).cuda()
if fpaa:
UpperCAmelCase__ : List[Any] = model.half()
# determine if we need to increase num_beams
use_task_specific_params(lowerCAmelCase , lowerCAmelCase ) # update config with task specific params
UpperCAmelCase__ : List[Any] = generate_kwargs.pop("num_beams" , model.config.num_beams ) # AttributeError risk?
if num_return_sequences > num_beams:
UpperCAmelCase__ : Any = num_return_sequences
UpperCAmelCase__ : List[Any] = AutoTokenizer.from_pretrained(lowerCAmelCase )
logger.info(F"Inferred tokenizer type: {tokenizer.__class__}" ) # if this is wrong, check config.model_type.
if max_source_length is None:
UpperCAmelCase__ : int = tokenizer.model_max_length
if prefix is None:
UpperCAmelCase__ : Union[str, Any] = prefix or getattr(model.config , "prefix" , "" ) or ""
UpperCAmelCase__ : str = SeqaSeqDataset(
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , max_target_length=1024 , type_path=lowerCAmelCase , n_obs=lowerCAmelCase , prefix=lowerCAmelCase , **lowerCAmelCase , )
# I set shuffle=True for a more accurate progress bar.
# If all the longest samples are first, the prog bar estimate is too high at the beginning.
UpperCAmelCase__ : Union[str, Any] = ds.make_sortish_sampler(lowerCAmelCase , distributed=lowerCAmelCase , add_extra_examples=lowerCAmelCase , shuffle=lowerCAmelCase )
UpperCAmelCase__ : Optional[int] = DataLoader(lowerCAmelCase , sampler=lowerCAmelCase , batch_size=lowerCAmelCase , collate_fn=ds.collate_fn )
UpperCAmelCase__ : str = []
for batch in tqdm(lowerCAmelCase ):
UpperCAmelCase__ : Dict = model.generate(
input_ids=batch["input_ids"].to(model.device ) , attention_mask=batch["attention_mask"].to(model.device ) , num_return_sequences=lowerCAmelCase , num_beams=lowerCAmelCase , **lowerCAmelCase , )
UpperCAmelCase__ : int = tokenizer.batch_decode(lowerCAmelCase , skip_special_tokens=lowerCAmelCase , clean_up_tokenization_spaces=lowerCAmelCase )
UpperCAmelCase__ : int = batch["ids"]
if num_return_sequences > 1:
UpperCAmelCase__ : str = chunks(lowerCAmelCase , lowerCAmelCase ) # batch size chunks, each of size num_return_seq
for i, pred in enumerate(lowerCAmelCase ):
results.append({"pred": pred, "id": ids[i].item()} )
save_json(lowerCAmelCase , lowerCAmelCase )
return results, sampler.num_replicas
def a__ ( ):
'''simple docstring'''
UpperCAmelCase__ : str = argparse.ArgumentParser(
epilog="Unspecified args like --num_beams=2 --decoder_start_token_id=4 are passed to model.generate" )
parser.add_argument("--data_dir" , type=lowerCAmelCase , help="like cnn_dm/test.source" )
parser.add_argument(
"--model_name" , type=lowerCAmelCase , help="like facebook/bart-large-cnn,t5-base, etc." , default="sshleifer/distilbart-xsum-12-3" , )
parser.add_argument("--save_dir" , type=lowerCAmelCase , help="where to save" , default="tmp_gen" )
parser.add_argument("--max_source_length" , type=lowerCAmelCase , default=lowerCAmelCase )
parser.add_argument(
"--type_path" , type=lowerCAmelCase , default="test" , help="which subset to evaluate typically train/val/test" )
parser.add_argument("--task" , type=lowerCAmelCase , default="summarization" , help="used for task_specific_params + metrics" )
parser.add_argument("--bs" , type=lowerCAmelCase , default=8 , required=lowerCAmelCase , help="batch size" )
parser.add_argument(
"--local_rank" , type=lowerCAmelCase , default=-1 , required=lowerCAmelCase , help="should be passed by distributed.launch" )
parser.add_argument(
"--n_obs" , type=lowerCAmelCase , default=lowerCAmelCase , required=lowerCAmelCase , help="How many observations. Defaults to all." )
parser.add_argument(
"--num_return_sequences" , type=lowerCAmelCase , default=1 , required=lowerCAmelCase , help="How many sequences to return" )
parser.add_argument(
"--sync_timeout" , type=lowerCAmelCase , default=600 , required=lowerCAmelCase , help="How long should master process wait for other processes to finish." , )
parser.add_argument("--src_lang" , type=lowerCAmelCase , default=lowerCAmelCase , required=lowerCAmelCase )
parser.add_argument("--tgt_lang" , type=lowerCAmelCase , default=lowerCAmelCase , required=lowerCAmelCase )
parser.add_argument(
"--prefix" , type=lowerCAmelCase , required=lowerCAmelCase , default=lowerCAmelCase , help="will be added to the begininng of src examples" )
parser.add_argument("--fp16" , action="store_true" )
parser.add_argument("--debug" , action="store_true" )
UpperCAmelCase__ : Optional[int] = time.time()
UpperCAmelCase__ , UpperCAmelCase__ : List[Any] = parser.parse_known_args()
UpperCAmelCase__ : int = parse_numeric_n_bool_cl_kwargs(lowerCAmelCase )
if generate_kwargs and args.local_rank <= 0:
print(F"parsed the following generate kwargs: {generate_kwargs}" )
UpperCAmelCase__ : Dict = Path(args.save_dir + "_tmp" )
Path(lowerCAmelCase ).mkdir(exist_ok=lowerCAmelCase ) # this handles locking.
UpperCAmelCase__ : List[str] = list(json_save_dir.glob("rank_*.json" ) )
if intermediate_files:
raise ValueError(F"Found files at {json_save_dir} please move or remove them." )
# In theory, a node could finish and save before another node hits this. If this happens, we can address later.
UpperCAmelCase__ : List[str] = {}
if args.src_lang is not None:
UpperCAmelCase__ : str = args.src_lang
if args.tgt_lang is not None:
UpperCAmelCase__ : List[str] = args.tgt_lang
Path(args.save_dir ).mkdir(exist_ok=lowerCAmelCase )
UpperCAmelCase__ , UpperCAmelCase__ : Optional[int] = eval_data_dir(
args.data_dir , lowerCAmelCase , args.model_name , type_path=args.type_path , bs=args.bs , fpaa=args.fpaa , task=args.task , local_rank=args.local_rank , n_obs=args.n_obs , max_source_length=args.max_source_length , num_return_sequences=args.num_return_sequences , prefix=args.prefix , dataset_kwargs=lowerCAmelCase , **lowerCAmelCase , )
if args.local_rank <= 0:
UpperCAmelCase__ : str = Path(args.save_dir )
save_dir.mkdir(exist_ok=lowerCAmelCase )
UpperCAmelCase__ : Tuple = gather_results_from_each_node(lowerCAmelCase , lowerCAmelCase , args.sync_timeout )
UpperCAmelCase__ : Union[str, Any] = combine_partial_results(lowerCAmelCase )
if args.num_return_sequences > 1:
UpperCAmelCase__ : int = save_dir.joinpath("pseudolabel_results.json" )
print(F"Saving aggregated results at {save_path}, intermediate in {json_save_dir}/" )
save_json(lowerCAmelCase , lowerCAmelCase )
return
UpperCAmelCase__ : Optional[Any] = Path(args.data_dir ).joinpath(args.type_path + ".target" )
with open(lowerCAmelCase ) as f:
UpperCAmelCase__ : Optional[int] = [x.rstrip() for x in f.readlines()][: len(lowerCAmelCase )]
# Calculate metrics, save metrics, and save _generations.txt
UpperCAmelCase__ : List[Any] = "translation" in args.task
UpperCAmelCase__ : Optional[Any] = calculate_bleu if calc_bleu else calculate_rouge
UpperCAmelCase__ : Optional[Any] = "bleu" if calc_bleu else "rouge"
UpperCAmelCase__ : Dict = score_fn(lowerCAmelCase , lowerCAmelCase )
UpperCAmelCase__ : List[Any] = len(lowerCAmelCase )
UpperCAmelCase__ : Union[str, Any] = time.time() - start_time
UpperCAmelCase__ : Optional[int] = round(runtime / metrics["n_obs"] , 4 )
UpperCAmelCase__ : Tuple = num_replicas
# TODO(@stas00): add whatever metadata to metrics
UpperCAmelCase__ : Any = save_dir.joinpath(F"{args.type_path}_{metric_name}.json" )
save_json(lowerCAmelCase , lowerCAmelCase , indent=lowerCAmelCase )
print(lowerCAmelCase )
write_txt_file(lowerCAmelCase , save_dir.joinpath(F"{args.type_path}_generations.txt" ) )
if args.debug:
write_txt_file(lowerCAmelCase , save_dir.joinpath(F"{args.type_path}.target" ) )
else:
shutil.rmtree(lowerCAmelCase )
def a__ ( lowerCAmelCase : Tuple ):
'''simple docstring'''
UpperCAmelCase__ : str = []
for partial_result in partial_results:
records.extend(lowerCAmelCase )
UpperCAmelCase__ : Dict = sorted(lowerCAmelCase , key=lambda lowerCAmelCase : x["id"] )
UpperCAmelCase__ : List[str] = [x["pred"] for x in records]
return preds
def a__ ( lowerCAmelCase : List[Any] , lowerCAmelCase : int , lowerCAmelCase : Optional[int] ):
'''simple docstring'''
# WAIT FOR lots of .json files
UpperCAmelCase__ : int = time.time()
logger.info("waiting for all nodes to finish" )
UpperCAmelCase__ : Dict = None
while (time.time() - start_wait) < timeout:
UpperCAmelCase__ : str = list(save_dir.glob("rank_*.json" ) )
if len(lowerCAmelCase ) < num_replicas:
continue
try:
# make sure all json files are fully saved
UpperCAmelCase__ : Union[str, Any] = lmap(lowerCAmelCase , lowerCAmelCase )
return json_data
except JSONDecodeError:
continue
else:
raise TimeoutError("Rank 0 gave up on waiting for other processes" )
# Unreachable
if __name__ == "__main__":
# Usage for MT:
run_generate()
| 660 | 1 |
"""simple docstring"""
import numpy as np
def a__ ( lowerCAmelCase : np.array ):
'''simple docstring'''
return 1 / (1 + np.exp(-vector ))
if __name__ == "__main__":
import doctest
doctest.testmod()
| 660 |
"""simple docstring"""
from timeit import timeit
def a__ ( lowerCAmelCase : int ):
'''simple docstring'''
if number < 0:
raise ValueError("the value of input must not be negative" )
UpperCAmelCase__ : Tuple = 0
while number:
number &= number - 1
result += 1
return result
def a__ ( lowerCAmelCase : int ):
'''simple docstring'''
if number < 0:
raise ValueError("the value of input must not be negative" )
UpperCAmelCase__ : Union[str, Any] = 0
while number:
if number % 2 == 1:
result += 1
number >>= 1
return result
def a__ ( ):
'''simple docstring'''
def do_benchmark(lowerCAmelCase : int ) -> None:
UpperCAmelCase__ : Dict = "import __main__ as z"
print(F"Benchmark when {number = }:" )
print(F"{get_set_bits_count_using_modulo_operator(lowerCAmelCase ) = }" )
UpperCAmelCase__ : Tuple = timeit("z.get_set_bits_count_using_modulo_operator(25)" , setup=lowerCAmelCase )
print(F"timeit() runs in {timing} seconds" )
print(F"{get_set_bits_count_using_brian_kernighans_algorithm(lowerCAmelCase ) = }" )
UpperCAmelCase__ : Any = timeit(
"z.get_set_bits_count_using_brian_kernighans_algorithm(25)" , setup=lowerCAmelCase , )
print(F"timeit() runs in {timing} seconds" )
for number in (25, 37, 58, 0):
do_benchmark(lowerCAmelCase )
print()
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 660 | 1 |
"""simple docstring"""
import unittest
from accelerate import debug_launcher
from accelerate.test_utils import require_cpu, test_ops, test_script
@require_cpu
class _lowercase ( unittest.TestCase ):
'''simple docstring'''
def lowerCAmelCase__ ( self )-> List[Any]:
debug_launcher(test_script.main )
def lowerCAmelCase__ ( self )-> List[Any]:
debug_launcher(test_ops.main )
| 660 |
"""simple docstring"""
import unittest
from transformers import load_tool
from .test_tools_common import ToolTesterMixin
class _lowercase ( unittest.TestCase , lowerCAmelCase_ ):
'''simple docstring'''
def lowerCAmelCase__ ( self )-> Dict:
UpperCAmelCase__ : Optional[Any] = load_tool("text-classification" )
self.tool.setup()
UpperCAmelCase__ : List[str] = load_tool("text-classification" , remote=__UpperCamelCase )
def lowerCAmelCase__ ( self )-> Union[str, Any]:
UpperCAmelCase__ : Dict = self.tool("That's quite cool" , ["positive", "negative"] )
self.assertEqual(__UpperCamelCase , "positive" )
def lowerCAmelCase__ ( self )-> str:
UpperCAmelCase__ : List[Any] = self.remote_tool("That's quite cool" , ["positive", "negative"] )
self.assertEqual(__UpperCamelCase , "positive" )
def lowerCAmelCase__ ( self )-> Optional[int]:
UpperCAmelCase__ : Any = self.tool(text="That's quite cool" , labels=["positive", "negative"] )
self.assertEqual(__UpperCamelCase , "positive" )
def lowerCAmelCase__ ( self )-> str:
UpperCAmelCase__ : str = self.remote_tool(text="That's quite cool" , labels=["positive", "negative"] )
self.assertEqual(__UpperCamelCase , "positive" )
| 660 | 1 |
"""simple docstring"""
import argparse
import os
import re
A__ : int = """src/diffusers"""
# Pattern that looks at the indentation in a line.
A__ : Optional[int] = re.compile(R"""^(\s*)\S""")
# Pattern that matches `"key":" and puts `key` in group 0.
A__ : Tuple = re.compile(R"""^\s*\"([^\"]+)\":""")
# Pattern that matches `_import_structure["key"]` and puts `key` in group 0.
A__ : Union[str, Any] = re.compile(R"""^\s*_import_structure\[\"([^\"]+)\"\]""")
# Pattern that matches `"key",` and puts `key` in group 0.
A__ : List[Any] = re.compile(R"""^\s*\"([^\"]+)\",\s*$""")
# Pattern that matches any `[stuff]` and puts `stuff` in group 0.
A__ : Any = re.compile(R"""\[([^\]]+)\]""")
def a__ ( lowerCAmelCase : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase__ : str = _re_indent.search(lowerCAmelCase )
return "" if search is None else search.groups()[0]
def a__ ( lowerCAmelCase : Any , lowerCAmelCase : List[str]="" , lowerCAmelCase : str=None , lowerCAmelCase : int=None ):
'''simple docstring'''
UpperCAmelCase__ : Optional[Any] = 0
UpperCAmelCase__ : Optional[int] = code.split("\n" )
if start_prompt is not None:
while not lines[index].startswith(lowerCAmelCase ):
index += 1
UpperCAmelCase__ : str = ["\n".join(lines[:index] )]
else:
UpperCAmelCase__ : List[Any] = []
# We split into blocks until we get to the `end_prompt` (or the end of the block).
UpperCAmelCase__ : str = [lines[index]]
index += 1
while index < len(lowerCAmelCase ) and (end_prompt is None or not lines[index].startswith(lowerCAmelCase )):
if len(lines[index] ) > 0 and get_indent(lines[index] ) == indent_level:
if len(lowerCAmelCase ) > 0 and get_indent(current_block[-1] ).startswith(indent_level + " " ):
current_block.append(lines[index] )
blocks.append("\n".join(lowerCAmelCase ) )
if index < len(lowerCAmelCase ) - 1:
UpperCAmelCase__ : Union[str, Any] = [lines[index + 1]]
index += 1
else:
UpperCAmelCase__ : Optional[Any] = []
else:
blocks.append("\n".join(lowerCAmelCase ) )
UpperCAmelCase__ : Union[str, Any] = [lines[index]]
else:
current_block.append(lines[index] )
index += 1
# Adds current block if it's nonempty.
if len(lowerCAmelCase ) > 0:
blocks.append("\n".join(lowerCAmelCase ) )
# Add final block after end_prompt if provided.
if end_prompt is not None and index < len(lowerCAmelCase ):
blocks.append("\n".join(lines[index:] ) )
return blocks
def a__ ( lowerCAmelCase : str ):
'''simple docstring'''
def _inner(lowerCAmelCase : Dict ):
return key(lowerCAmelCase ).lower().replace("_" , "" )
return _inner
def a__ ( lowerCAmelCase : Tuple , lowerCAmelCase : Tuple=None ):
'''simple docstring'''
# If no key is provided, we use a noop.
def noop(lowerCAmelCase : Dict ):
return x
if key is None:
UpperCAmelCase__ : Dict = noop
# Constants are all uppercase, they go first.
UpperCAmelCase__ : Dict = [obj for obj in objects if key(lowerCAmelCase ).isupper()]
# Classes are not all uppercase but start with a capital, they go second.
UpperCAmelCase__ : Tuple = [obj for obj in objects if key(lowerCAmelCase )[0].isupper() and not key(lowerCAmelCase ).isupper()]
# Functions begin with a lowercase, they go last.
UpperCAmelCase__ : Tuple = [obj for obj in objects if not key(lowerCAmelCase )[0].isupper()]
UpperCAmelCase__ : Union[str, Any] = ignore_underscore(lowerCAmelCase )
return sorted(lowerCAmelCase , key=lowerCAmelCase ) + sorted(lowerCAmelCase , key=lowerCAmelCase ) + sorted(lowerCAmelCase , key=lowerCAmelCase )
def a__ ( lowerCAmelCase : Union[str, Any] ):
'''simple docstring'''
# This inner function sort imports between [ ].
def _replace(lowerCAmelCase : List[Any] ):
UpperCAmelCase__ : int = match.groups()[0]
if "," not in imports:
return F"[{imports}]"
UpperCAmelCase__ : str = [part.strip().replace("\"" , "" ) for part in imports.split("," )]
# We will have a final empty element if the line finished with a comma.
if len(keys[-1] ) == 0:
UpperCAmelCase__ : Dict = keys[:-1]
return "[" + ", ".join([F"\"{k}\"" for k in sort_objects(lowerCAmelCase )] ) + "]"
UpperCAmelCase__ : List[Any] = import_statement.split("\n" )
if len(lowerCAmelCase ) > 3:
# Here we have to sort internal imports that are on several lines (one per name):
# key: [
# "object1",
# "object2",
# ...
# ]
# We may have to ignore one or two lines on each side.
UpperCAmelCase__ : str = 2 if lines[1].strip() == "[" else 1
UpperCAmelCase__ : int = [(i, _re_strip_line.search(lowerCAmelCase ).groups()[0]) for i, line in enumerate(lines[idx:-idx] )]
UpperCAmelCase__ : int = sort_objects(lowerCAmelCase , key=lambda lowerCAmelCase : x[1] )
UpperCAmelCase__ : str = [lines[x[0] + idx] for x in sorted_indices]
return "\n".join(lines[:idx] + sorted_lines + lines[-idx:] )
elif len(lowerCAmelCase ) == 3:
# Here we have to sort internal imports that are on one separate line:
# key: [
# "object1", "object2", ...
# ]
if _re_bracket_content.search(lines[1] ) is not None:
UpperCAmelCase__ : Union[str, Any] = _re_bracket_content.sub(_replace , lines[1] )
else:
UpperCAmelCase__ : Optional[Any] = [part.strip().replace("\"" , "" ) for part in lines[1].split("," )]
# We will have a final empty element if the line finished with a comma.
if len(keys[-1] ) == 0:
UpperCAmelCase__ : Tuple = keys[:-1]
UpperCAmelCase__ : Union[str, Any] = get_indent(lines[1] ) + ", ".join([F"\"{k}\"" for k in sort_objects(lowerCAmelCase )] )
return "\n".join(lowerCAmelCase )
else:
# Finally we have to deal with imports fitting on one line
UpperCAmelCase__ : Any = _re_bracket_content.sub(_replace , lowerCAmelCase )
return import_statement
def a__ ( lowerCAmelCase : int , lowerCAmelCase : Union[str, Any]=True ):
'''simple docstring'''
with open(lowerCAmelCase , "r" ) as f:
UpperCAmelCase__ : List[str] = f.read()
if "_import_structure" not in code:
return
# Blocks of indent level 0
UpperCAmelCase__ : Tuple = split_code_in_indented_blocks(
lowerCAmelCase , start_prompt="_import_structure = {" , end_prompt="if TYPE_CHECKING:" )
# We ignore block 0 (everything until start_prompt) and the last block (everything after end_prompt).
for block_idx in range(1 , len(lowerCAmelCase ) - 1 ):
# Check if the block contains some `_import_structure`s thingy to sort.
UpperCAmelCase__ : Optional[int] = main_blocks[block_idx]
UpperCAmelCase__ : Any = block.split("\n" )
# Get to the start of the imports.
UpperCAmelCase__ : Tuple = 0
while line_idx < len(lowerCAmelCase ) and "_import_structure" not in block_lines[line_idx]:
# Skip dummy import blocks
if "import dummy" in block_lines[line_idx]:
UpperCAmelCase__ : Tuple = len(lowerCAmelCase )
else:
line_idx += 1
if line_idx >= len(lowerCAmelCase ):
continue
# Ignore beginning and last line: they don't contain anything.
UpperCAmelCase__ : Dict = "\n".join(block_lines[line_idx:-1] )
UpperCAmelCase__ : Union[str, Any] = get_indent(block_lines[1] )
# Slit the internal block into blocks of indent level 1.
UpperCAmelCase__ : Union[str, Any] = split_code_in_indented_blocks(lowerCAmelCase , indent_level=lowerCAmelCase )
# We have two categories of import key: list or _import_structure[key].append/extend
UpperCAmelCase__ : str = _re_direct_key if "_import_structure" in block_lines[0] else _re_indirect_key
# Grab the keys, but there is a trap: some lines are empty or just comments.
UpperCAmelCase__ : Optional[int] = [(pattern.search(lowerCAmelCase ).groups()[0] if pattern.search(lowerCAmelCase ) is not None else None) for b in internal_blocks]
# We only sort the lines with a key.
UpperCAmelCase__ : Optional[int] = [(i, key) for i, key in enumerate(lowerCAmelCase ) if key is not None]
UpperCAmelCase__ : str = [x[0] for x in sorted(lowerCAmelCase , key=lambda lowerCAmelCase : x[1] )]
# We reorder the blocks by leaving empty lines/comments as they were and reorder the rest.
UpperCAmelCase__ : str = 0
UpperCAmelCase__ : Union[str, Any] = []
for i in range(len(lowerCAmelCase ) ):
if keys[i] is None:
reordered_blocks.append(internal_blocks[i] )
else:
UpperCAmelCase__ : int = sort_objects_in_import(internal_blocks[sorted_indices[count]] )
reordered_blocks.append(lowerCAmelCase )
count += 1
# And we put our main block back together with its first and last line.
UpperCAmelCase__ : Dict = "\n".join(block_lines[:line_idx] + reordered_blocks + [block_lines[-1]] )
if code != "\n".join(lowerCAmelCase ):
if check_only:
return True
else:
print(F"Overwriting {file}." )
with open(lowerCAmelCase , "w" ) as f:
f.write("\n".join(lowerCAmelCase ) )
def a__ ( lowerCAmelCase : Optional[Any]=True ):
'''simple docstring'''
UpperCAmelCase__ : int = []
for root, _, files in os.walk(lowerCAmelCase ):
if "__init__.py" in files:
UpperCAmelCase__ : Tuple = sort_imports(os.path.join(lowerCAmelCase , "__init__.py" ) , check_only=lowerCAmelCase )
if result:
UpperCAmelCase__ : List[Any] = [os.path.join(lowerCAmelCase , "__init__.py" )]
if len(lowerCAmelCase ) > 0:
raise ValueError(F"Would overwrite {len(lowerCAmelCase )} files, run `make style`." )
if __name__ == "__main__":
A__ : str = argparse.ArgumentParser()
parser.add_argument("""--check_only""", action="""store_true""", help="""Whether to only check or fix style.""")
A__ : List[Any] = parser.parse_args()
sort_imports_in_all_inits(check_only=args.check_only)
| 660 |
"""simple docstring"""
def a__ ( lowerCAmelCase : list , lowerCAmelCase : list ):
'''simple docstring'''
_validate_point(lowerCAmelCase )
_validate_point(lowerCAmelCase )
if len(lowerCAmelCase ) != len(lowerCAmelCase ):
raise ValueError("Both points must be in the same n-dimensional space" )
return float(sum(abs(a - b ) for a, b in zip(lowerCAmelCase , lowerCAmelCase ) ) )
def a__ ( lowerCAmelCase : list[float] ):
'''simple docstring'''
if point:
if isinstance(lowerCAmelCase , lowerCAmelCase ):
for item in point:
if not isinstance(lowerCAmelCase , (int, float) ):
UpperCAmelCase__ : Tuple = (
"Expected a list of numbers as input, found "
F"{type(lowerCAmelCase ).__name__}"
)
raise TypeError(lowerCAmelCase )
else:
UpperCAmelCase__ : Dict = F"Expected a list of numbers as input, found {type(lowerCAmelCase ).__name__}"
raise TypeError(lowerCAmelCase )
else:
raise ValueError("Missing an input" )
def a__ ( lowerCAmelCase : list , lowerCAmelCase : list ):
'''simple docstring'''
_validate_point(lowerCAmelCase )
_validate_point(lowerCAmelCase )
if len(lowerCAmelCase ) != len(lowerCAmelCase ):
raise ValueError("Both points must be in the same n-dimensional space" )
return float(sum(abs(x - y ) for x, y in zip(lowerCAmelCase , lowerCAmelCase ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 660 | 1 |
"""simple docstring"""
from typing import Dict, Optional
import numpy as np
import datasets
A__ : Tuple = """
IoU is the area of overlap between the predicted segmentation and the ground truth divided by the area of union
between the predicted segmentation and the ground truth. For binary (two classes) or multi-class segmentation,
the mean IoU of the image is calculated by taking the IoU of each class and averaging them.
"""
A__ : Optional[Any] = """
Args:
predictions (`List[ndarray]`):
List of predicted segmentation maps, each of shape (height, width). Each segmentation map can be of a different size.
references (`List[ndarray]`):
List of ground truth segmentation maps, each of shape (height, width). Each segmentation map can be of a different size.
num_labels (`int`):
Number of classes (categories).
ignore_index (`int`):
Index that will be ignored during evaluation.
nan_to_num (`int`, *optional*):
If specified, NaN values will be replaced by the number defined by the user.
label_map (`dict`, *optional*):
If specified, dictionary mapping old label indices to new label indices.
reduce_labels (`bool`, *optional*, defaults to `False`):
Whether or not to reduce all label values of segmentation maps by 1. Usually used for datasets where 0 is used for background,
and background itself is not included in all classes of a dataset (e.g. ADE20k). The background label will be replaced by 255.
Returns:
`Dict[str, float | ndarray]` comprising various elements:
- *mean_iou* (`float`):
Mean Intersection-over-Union (IoU averaged over all categories).
- *mean_accuracy* (`float`):
Mean accuracy (averaged over all categories).
- *overall_accuracy* (`float`):
Overall accuracy on all images.
- *per_category_accuracy* (`ndarray` of shape `(num_labels,)`):
Per category accuracy.
- *per_category_iou* (`ndarray` of shape `(num_labels,)`):
Per category IoU.
Examples:
>>> import numpy as np
>>> mean_iou = datasets.load_metric(\"mean_iou\")
>>> # suppose one has 3 different segmentation maps predicted
>>> predicted_1 = np.array([[1, 2], [3, 4], [5, 255]])
>>> actual_1 = np.array([[0, 3], [5, 4], [6, 255]])
>>> predicted_2 = np.array([[2, 7], [9, 2], [3, 6]])
>>> actual_2 = np.array([[1, 7], [9, 2], [3, 6]])
>>> predicted_3 = np.array([[2, 2, 3], [8, 2, 4], [3, 255, 2]])
>>> actual_3 = np.array([[1, 2, 2], [8, 2, 1], [3, 255, 1]])
>>> predicted = [predicted_1, predicted_2, predicted_3]
>>> ground_truth = [actual_1, actual_2, actual_3]
>>> results = mean_iou.compute(predictions=predicted, references=ground_truth, num_labels=10, ignore_index=255, reduce_labels=False)
>>> print(results) # doctest: +NORMALIZE_WHITESPACE
{'mean_iou': 0.47750000000000004, 'mean_accuracy': 0.5916666666666666, 'overall_accuracy': 0.5263157894736842, 'per_category_iou': array([0. , 0. , 0.375, 0.4 , 0.5 , 0. , 0.5 , 1. , 1. , 1. ]), 'per_category_accuracy': array([0. , 0. , 0.75 , 0.66666667, 1. , 0. , 0.5 , 1. , 1. , 1. ])}
"""
A__ : Union[str, Any] = """\
@software{MMSegmentation_Contributors_OpenMMLab_Semantic_Segmentation_2020,
author = {{MMSegmentation Contributors}},
license = {Apache-2.0},
month = {7},
title = {{OpenMMLab Semantic Segmentation Toolbox and Benchmark}},
url = {https://github.com/open-mmlab/mmsegmentation},
year = {2020}
}"""
def a__ ( lowerCAmelCase : Union[str, Any] , lowerCAmelCase : Any , lowerCAmelCase : Optional[Any] , lowerCAmelCase : bool , lowerCAmelCase : Optional[Dict[int, int]] = None , lowerCAmelCase : bool = False , ):
'''simple docstring'''
if label_map is not None:
for old_id, new_id in label_map.items():
UpperCAmelCase__ : Any = new_id
# turn into Numpy arrays
UpperCAmelCase__ : Optional[int] = np.array(lowerCAmelCase )
UpperCAmelCase__ : Tuple = np.array(lowerCAmelCase )
if reduce_labels:
UpperCAmelCase__ : Any = 255
UpperCAmelCase__ : str = label - 1
UpperCAmelCase__ : str = 255
UpperCAmelCase__ : Optional[int] = label != ignore_index
UpperCAmelCase__ : str = np.not_equal(lowerCAmelCase , lowerCAmelCase )
UpperCAmelCase__ : Optional[Any] = pred_label[mask]
UpperCAmelCase__ : Union[str, Any] = np.array(lowerCAmelCase )[mask]
UpperCAmelCase__ : Optional[Any] = pred_label[pred_label == label]
UpperCAmelCase__ : int = np.histogram(lowerCAmelCase , bins=lowerCAmelCase , range=(0, num_labels - 1) )[0]
UpperCAmelCase__ : Union[str, Any] = np.histogram(lowerCAmelCase , bins=lowerCAmelCase , range=(0, num_labels - 1) )[0]
UpperCAmelCase__ : List[Any] = np.histogram(lowerCAmelCase , bins=lowerCAmelCase , range=(0, num_labels - 1) )[0]
UpperCAmelCase__ : List[Any] = area_pred_label + area_label - area_intersect
return area_intersect, area_union, area_pred_label, area_label
def a__ ( lowerCAmelCase : Union[str, Any] , lowerCAmelCase : Dict , lowerCAmelCase : Union[str, Any] , lowerCAmelCase : bool , lowerCAmelCase : Optional[Dict[int, int]] = None , lowerCAmelCase : bool = False , ):
'''simple docstring'''
UpperCAmelCase__ : List[str] = np.zeros((num_labels,) , dtype=np.floataa )
UpperCAmelCase__ : List[str] = np.zeros((num_labels,) , dtype=np.floataa )
UpperCAmelCase__ : Tuple = np.zeros((num_labels,) , dtype=np.floataa )
UpperCAmelCase__ : List[Any] = np.zeros((num_labels,) , dtype=np.floataa )
for result, gt_seg_map in zip(lowerCAmelCase , lowerCAmelCase ):
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : Union[str, Any] = intersect_and_union(
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
total_area_intersect += area_intersect
total_area_union += area_union
total_area_pred_label += area_pred_label
total_area_label += area_label
return total_area_intersect, total_area_union, total_area_pred_label, total_area_label
def a__ ( lowerCAmelCase : Optional[int] , lowerCAmelCase : List[str] , lowerCAmelCase : List[str] , lowerCAmelCase : bool , lowerCAmelCase : Optional[int] = None , lowerCAmelCase : Optional[Dict[int, int]] = None , lowerCAmelCase : bool = False , ):
'''simple docstring'''
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : str = total_intersect_and_union(
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
# compute metrics
UpperCAmelCase__ : Dict = {}
UpperCAmelCase__ : Dict = total_area_intersect.sum() / total_area_label.sum()
UpperCAmelCase__ : Any = total_area_intersect / total_area_union
UpperCAmelCase__ : Optional[int] = total_area_intersect / total_area_label
UpperCAmelCase__ : Optional[int] = np.nanmean(lowerCAmelCase )
UpperCAmelCase__ : List[Any] = np.nanmean(lowerCAmelCase )
UpperCAmelCase__ : Union[str, Any] = all_acc
UpperCAmelCase__ : Tuple = iou
UpperCAmelCase__ : Dict = acc
if nan_to_num is not None:
UpperCAmelCase__ : Tuple = {metric: np.nan_to_num(lowerCAmelCase , nan=lowerCAmelCase ) for metric, metric_value in metrics.items()}
return metrics
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _lowercase ( datasets.Metric ):
'''simple docstring'''
def lowerCAmelCase__ ( self )-> Optional[int]:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
# 1st Seq - height dim, 2nd - width dim
{
"predictions": datasets.Sequence(datasets.Sequence(datasets.Value("uint16" ) ) ),
"references": datasets.Sequence(datasets.Sequence(datasets.Value("uint16" ) ) ),
} ) , reference_urls=[
"https://github.com/open-mmlab/mmsegmentation/blob/71c201b1813267d78764f306a297ca717827c4bf/mmseg/core/evaluation/metrics.py"
] , )
def lowerCAmelCase__ ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = False , )-> Dict:
UpperCAmelCase__ : List[str] = mean_iou(
results=__UpperCamelCase , gt_seg_maps=__UpperCamelCase , num_labels=__UpperCamelCase , ignore_index=__UpperCamelCase , nan_to_num=__UpperCamelCase , label_map=__UpperCamelCase , reduce_labels=__UpperCamelCase , )
return iou_result
| 660 |
"""simple docstring"""
import math
def a__ ( lowerCAmelCase : int ):
'''simple docstring'''
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(lowerCAmelCase ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def a__ ( lowerCAmelCase : int = 1_0001 ):
'''simple docstring'''
try:
UpperCAmelCase__ : List[str] = int(lowerCAmelCase )
except (TypeError, ValueError):
raise TypeError("Parameter nth must be int or castable to int." ) from None
if nth <= 0:
raise ValueError("Parameter nth must be greater than or equal to one." )
UpperCAmelCase__ : list[int] = []
UpperCAmelCase__ : str = 2
while len(lowerCAmelCase ) < nth:
if is_prime(lowerCAmelCase ):
primes.append(lowerCAmelCase )
num += 1
else:
num += 1
return primes[len(lowerCAmelCase ) - 1]
if __name__ == "__main__":
print(f"""{solution() = }""")
| 660 | 1 |
"""simple docstring"""
import argparse
import intel_extension_for_pytorch as ipex
import torch
from diffusers import DPMSolverMultistepScheduler, StableDiffusionPipeline
A__ : Optional[int] = argparse.ArgumentParser("""Stable Diffusion script with intel optimization""", add_help=False)
parser.add_argument("""--dpm""", action="""store_true""", help="""Enable DPMSolver or not""")
parser.add_argument("""--steps""", default=None, type=int, help="""Num inference steps""")
A__ : List[str] = parser.parse_args()
A__ : List[Any] = """cpu"""
A__ : Union[str, Any] = """a lovely <dicoo> in red dress and hat, in the snowly and brightly night, with many brighly buildings"""
A__ : str = """path-to-your-trained-model"""
A__ : List[str] = StableDiffusionPipeline.from_pretrained(model_id)
if args.dpm:
A__ : Dict = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config)
A__ : List[str] = pipe.to(device)
# to channels last
A__ : Optional[int] = pipe.unet.to(memory_format=torch.channels_last)
A__ : int = pipe.vae.to(memory_format=torch.channels_last)
A__ : Optional[int] = pipe.text_encoder.to(memory_format=torch.channels_last)
if pipe.requires_safety_checker:
A__ : Dict = pipe.safety_checker.to(memory_format=torch.channels_last)
# optimize with ipex
A__ : Any = torch.randn(2, 4, 64, 64)
A__ : Dict = torch.rand(1) * 999
A__ : str = torch.randn(2, 77, 768)
A__ : str = (sample, timestep, encoder_hidden_status)
try:
A__ : Union[str, Any] = ipex.optimize(pipe.unet.eval(), dtype=torch.bfloataa, inplace=True, sample_input=input_example)
except Exception:
A__ : Union[str, Any] = ipex.optimize(pipe.unet.eval(), dtype=torch.bfloataa, inplace=True)
A__ : int = ipex.optimize(pipe.vae.eval(), dtype=torch.bfloataa, inplace=True)
A__ : Any = ipex.optimize(pipe.text_encoder.eval(), dtype=torch.bfloataa, inplace=True)
if pipe.requires_safety_checker:
A__ : Union[str, Any] = ipex.optimize(pipe.safety_checker.eval(), dtype=torch.bfloataa, inplace=True)
# compute
A__ : List[str] = 666
A__ : Union[str, Any] = torch.Generator(device).manual_seed(seed)
A__ : str = {"""generator""": generator}
if args.steps is not None:
A__ : Any = args.steps
with torch.cpu.amp.autocast(enabled=True, dtype=torch.bfloataa):
A__ : Any = pipe(prompt, **generate_kwargs).images[0]
# save image
image.save("""generated.png""")
| 660 |
"""simple docstring"""
import inspect
import math
import tempfile
import unittest
import numpy as np
from transformers import ViTMAEConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTMAEForPreTraining, ViTMAEModel
from transformers.models.vit.modeling_vit import VIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class _lowercase :
'''simple docstring'''
def __init__( self , __UpperCamelCase , __UpperCamelCase=13 , __UpperCamelCase=30 , __UpperCamelCase=2 , __UpperCamelCase=3 , __UpperCamelCase=True , __UpperCamelCase=True , __UpperCamelCase=32 , __UpperCamelCase=5 , __UpperCamelCase=4 , __UpperCamelCase=37 , __UpperCamelCase="gelu" , __UpperCamelCase=0.1 , __UpperCamelCase=0.1 , __UpperCamelCase=10 , __UpperCamelCase=0.02 , __UpperCamelCase=3 , __UpperCamelCase=0.6 , __UpperCamelCase=None , )-> List[Any]:
UpperCAmelCase__ : str = parent
UpperCAmelCase__ : Optional[Any] = batch_size
UpperCAmelCase__ : Any = image_size
UpperCAmelCase__ : Dict = patch_size
UpperCAmelCase__ : Optional[Any] = num_channels
UpperCAmelCase__ : Union[str, Any] = is_training
UpperCAmelCase__ : Any = use_labels
UpperCAmelCase__ : List[Any] = hidden_size
UpperCAmelCase__ : Dict = num_hidden_layers
UpperCAmelCase__ : List[str] = num_attention_heads
UpperCAmelCase__ : List[Any] = intermediate_size
UpperCAmelCase__ : Dict = hidden_act
UpperCAmelCase__ : List[Any] = hidden_dropout_prob
UpperCAmelCase__ : Optional[int] = attention_probs_dropout_prob
UpperCAmelCase__ : Optional[Any] = type_sequence_label_size
UpperCAmelCase__ : Union[str, Any] = initializer_range
UpperCAmelCase__ : int = mask_ratio
UpperCAmelCase__ : Tuple = scope
# in ViTMAE, the expected sequence length = (num_patches + 1) * (1 - config.mask_ratio), rounded above
# (we add 1 for the [CLS] token)
UpperCAmelCase__ : int = (image_size // patch_size) ** 2
UpperCAmelCase__ : str = int(math.ceil((1 - mask_ratio) * (num_patches + 1) ) )
def lowerCAmelCase__ ( self )-> List[Any]:
UpperCAmelCase__ : List[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCAmelCase__ : Optional[Any] = None
if self.use_labels:
UpperCAmelCase__ : Dict = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCAmelCase__ : Optional[int] = self.get_config()
return config, pixel_values, labels
def lowerCAmelCase__ ( self )-> int:
return ViTMAEConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=__UpperCamelCase , initializer_range=self.initializer_range , mask_ratio=self.mask_ratio , )
def lowerCAmelCase__ ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )-> Union[str, Any]:
UpperCAmelCase__ : Optional[Any] = ViTMAEModel(config=__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
UpperCAmelCase__ : List[str] = model(__UpperCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCAmelCase__ ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )-> Optional[int]:
UpperCAmelCase__ : List[Any] = ViTMAEForPreTraining(__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
UpperCAmelCase__ : Optional[int] = model(__UpperCamelCase )
UpperCAmelCase__ : List[str] = (self.image_size // self.patch_size) ** 2
UpperCAmelCase__ : List[str] = self.patch_size**2 * self.num_channels
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) )
# test greyscale images
UpperCAmelCase__ : Dict = 1
UpperCAmelCase__ : str = ViTMAEForPreTraining(__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
UpperCAmelCase__ : int = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
UpperCAmelCase__ : List[str] = model(__UpperCamelCase )
UpperCAmelCase__ : List[str] = self.patch_size**2
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) )
def lowerCAmelCase__ ( self )-> Dict:
UpperCAmelCase__ : Tuple = self.prepare_config_and_inputs()
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : str = config_and_inputs
UpperCAmelCase__ : int = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class _lowercase ( lowerCAmelCase_ , lowerCAmelCase_ , unittest.TestCase ):
'''simple docstring'''
_A = (ViTMAEModel, ViTMAEForPreTraining) if is_torch_available() else ()
_A = {'feature-extraction': ViTMAEModel} if is_torch_available() else {}
_A = False
_A = False
_A = False
_A = False
def lowerCAmelCase__ ( self )-> List[Any]:
UpperCAmelCase__ : Any = ViTMAEModelTester(self )
UpperCAmelCase__ : List[str] = ConfigTester(self , config_class=__UpperCamelCase , has_text_modality=__UpperCamelCase , hidden_size=37 )
def lowerCAmelCase__ ( self )-> int:
self.config_tester.run_common_tests()
@unittest.skip(reason="ViTMAE does not use inputs_embeds" )
def lowerCAmelCase__ ( self )-> Dict:
pass
def lowerCAmelCase__ ( self )-> List[str]:
UpperCAmelCase__ , UpperCAmelCase__ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase__ : int = model_class(__UpperCamelCase )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
UpperCAmelCase__ : Tuple = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__UpperCamelCase , nn.Linear ) )
def lowerCAmelCase__ ( self )-> Optional[Any]:
UpperCAmelCase__ , UpperCAmelCase__ : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase__ : List[str] = model_class(__UpperCamelCase )
UpperCAmelCase__ : Dict = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase__ : Dict = [*signature.parameters.keys()]
UpperCAmelCase__ : Tuple = ["pixel_values"]
self.assertListEqual(arg_names[:1] , __UpperCamelCase )
def lowerCAmelCase__ ( self )-> Any:
UpperCAmelCase__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__UpperCamelCase )
def lowerCAmelCase__ ( self )-> Dict:
UpperCAmelCase__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*__UpperCamelCase )
def lowerCAmelCase__ ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )-> Dict:
# make masks reproducible
np.random.seed(2 )
UpperCAmelCase__ : Tuple = int((pt_model.config.image_size // pt_model.config.patch_size) ** 2 )
UpperCAmelCase__ : Union[str, Any] = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
UpperCAmelCase__ : str = torch.from_numpy(__UpperCamelCase )
# Add `noise` argument.
# PT inputs will be prepared in `super().check_pt_tf_models()` with this added `noise` argument
UpperCAmelCase__ : Optional[Any] = pt_noise
super().check_pt_tf_models(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
def lowerCAmelCase__ ( self )-> List[Any]:
UpperCAmelCase__ , UpperCAmelCase__ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase__ : List[Any] = model_class(__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
# make random mask reproducible
torch.manual_seed(2 )
with torch.no_grad():
UpperCAmelCase__ : Optional[int] = model(**self._prepare_for_class(__UpperCamelCase , __UpperCamelCase ) )
UpperCAmelCase__ : Optional[Any] = outputs[0].cpu().numpy()
UpperCAmelCase__ : Union[str, Any] = 0
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(__UpperCamelCase )
UpperCAmelCase__ : Union[str, Any] = model_class.from_pretrained(__UpperCamelCase )
model.to(__UpperCamelCase )
# make random mask reproducible
torch.manual_seed(2 )
with torch.no_grad():
UpperCAmelCase__ : List[Any] = model(**self._prepare_for_class(__UpperCamelCase , __UpperCamelCase ) )
# Make sure we don't have nans
UpperCAmelCase__ : Tuple = after_outputs[0].cpu().numpy()
UpperCAmelCase__ : int = 0
UpperCAmelCase__ : str = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(__UpperCamelCase , 1E-5 )
@unittest.skip(
reason="ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load\n to get deterministic results." )
def lowerCAmelCase__ ( self )-> List[str]:
pass
@unittest.skip(
reason="ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load\n to get deterministic results." )
def lowerCAmelCase__ ( self )-> Any:
pass
@unittest.skip(
reason="ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load\n to get deterministic results." )
def lowerCAmelCase__ ( self )-> Optional[Any]:
pass
@unittest.skip(reason="ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load" )
def lowerCAmelCase__ ( self )-> List[Any]:
pass
@unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." )
def lowerCAmelCase__ ( self )-> Union[str, Any]:
pass
@slow
def lowerCAmelCase__ ( self )-> Union[str, Any]:
for model_name in VIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase__ : Tuple = ViTMAEModel.from_pretrained(__UpperCamelCase )
self.assertIsNotNone(__UpperCamelCase )
def a__ ( ):
'''simple docstring'''
UpperCAmelCase__ : Union[str, Any] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class _lowercase ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def lowerCAmelCase__ ( self )-> List[Any]:
return ViTImageProcessor.from_pretrained("facebook/vit-mae-base" ) if is_vision_available() else None
@slow
def lowerCAmelCase__ ( self )-> Optional[int]:
# make random mask reproducible across the PT and TF model
np.random.seed(2 )
UpperCAmelCase__ : Any = ViTMAEForPreTraining.from_pretrained("facebook/vit-mae-base" ).to(__UpperCamelCase )
UpperCAmelCase__ : Tuple = self.default_image_processor
UpperCAmelCase__ : List[Any] = prepare_img()
UpperCAmelCase__ : Optional[Any] = image_processor(images=__UpperCamelCase , return_tensors="pt" ).to(__UpperCamelCase )
# prepare a noise vector that will be also used for testing the TF model
# (this way we can ensure that the PT and TF models operate on the same inputs)
UpperCAmelCase__ : List[Any] = ViTMAEConfig()
UpperCAmelCase__ : str = int((vit_mae_config.image_size // vit_mae_config.patch_size) ** 2 )
UpperCAmelCase__ : Optional[int] = np.random.uniform(size=(1, num_patches) )
# forward pass
with torch.no_grad():
UpperCAmelCase__ : List[str] = model(**__UpperCamelCase , noise=torch.from_numpy(__UpperCamelCase ).to(device=__UpperCamelCase ) )
# verify the logits
UpperCAmelCase__ : str = torch.Size((1, 1_96, 7_68) )
self.assertEqual(outputs.logits.shape , __UpperCamelCase )
UpperCAmelCase__ : List[Any] = torch.tensor(
[[-0.0548, -1.7023, -0.9325], [0.3721, -0.5670, -0.2233], [0.8235, -1.3878, -0.3524]] )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3] , expected_slice.to(__UpperCamelCase ) , atol=1E-4 ) )
| 660 | 1 |
"""simple docstring"""
import os
import re
import unicodedata
from shutil import copyfile
from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple, Union
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import is_torch_available, logging
if is_torch_available():
import torch
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
A__ : Any = logging.get_logger(__name__)
A__ : List[Any] = {"""vocab_file""": """spiece.model"""}
A__ : Optional[Any] = {
"""vocab_file""": {
"""AI-Sweden/gpt-sw3-126m""": """https://huggingface.co/AI-Sweden/gpt-sw3-126m/resolve/main/spiece.model""",
"""AI-Sweden/gpt-sw3-350m""": """https://huggingface.co/AI-Sweden/gpt-sw3-350m/resolve/main/spiece.model""",
"""AI-Sweden/gpt-sw3-1.6b""": """https://huggingface.co/AI-Sweden/gpt-sw3-1.6b/resolve/main/spiece.model""",
"""AI-Sweden/gpt-sw3-6.7b""": """https://huggingface.co/AI-Sweden/gpt-sw3-6.7b/resolve/main/spiece.model""",
"""AI-Sweden/gpt-sw3-20b""": """https://huggingface.co/AI-Sweden/gpt-sw3-20b/resolve/main/spiece.model""",
}
}
A__ : int = {
"""AI-Sweden/gpt-sw3-126m""": 2_048,
"""AI-Sweden/gpt-sw3-350m""": 2_048,
"""AI-Sweden/gpt-sw3-1.6b""": 2_048,
"""AI-Sweden/gpt-sw3-6.7b""": 2_048,
"""AI-Sweden/gpt-sw3-20b""": 2_048,
}
class _lowercase ( lowerCAmelCase_ ):
'''simple docstring'''
_A = VOCAB_FILES_NAMES
_A = PRETRAINED_VOCAB_FILES_MAP
_A = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_A = ['input_ids', 'attention_mask']
def __init__( self , __UpperCamelCase , __UpperCamelCase=False , __UpperCamelCase=False , __UpperCamelCase=False , __UpperCamelCase=None , __UpperCamelCase=None , __UpperCamelCase=None , __UpperCamelCase=None , __UpperCamelCase = None , **__UpperCamelCase , )-> None:
UpperCAmelCase__ : List[Any] = {} if sp_model_kwargs is None else sp_model_kwargs
UpperCAmelCase__ : Any = kwargs.get("name_or_path" )
if name_or_path is None:
logger.warning(
"name_or_path not provided, will work for all GPTSw3 models except gpt-sw3-7b,"
" you are testing the model, this can safely be ignored" )
UpperCAmelCase__ : List[Any] = "None"
# Default definitions for our 2 tokenizer versions, with None-checks to enable proper testing
UpperCAmelCase__ : Any = "<|endoftext|>" if eos_token is None else eos_token
UpperCAmelCase__ : Union[str, Any] = "<unk>" if unk_token is None else unk_token
if "gpt-sw3-7b" in name_or_path:
UpperCAmelCase__ : int = unk_token if pad_token is None else pad_token
UpperCAmelCase__ : List[Any] = eos_token if bos_token is None else bos_token
else:
UpperCAmelCase__ : List[str] = "<pad>" if pad_token is None else pad_token
UpperCAmelCase__ : str = "<s>" if bos_token is None else bos_token
super().__init__(
do_lower_case=__UpperCamelCase , remove_space=__UpperCamelCase , keep_accents=__UpperCamelCase , bos_token=__UpperCamelCase , eos_token=__UpperCamelCase , unk_token=__UpperCamelCase , pad_token=__UpperCamelCase , sp_model_kwargs=self.sp_model_kwargs , **__UpperCamelCase , )
UpperCAmelCase__ : Optional[int] = do_lower_case
UpperCAmelCase__ : int = remove_space
UpperCAmelCase__ : Union[str, Any] = keep_accents
UpperCAmelCase__ : Any = vocab_file
UpperCAmelCase__ : List[str] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(__UpperCamelCase )
# Used for whitespace normalization in input texts
# fmt : off
UpperCAmelCase__ : Optional[Any] = {" ", " ", " ", " ", " ", " ", " ", " ", " ", " ", "", ""}
# fmt : on
# Regular expression to remove non-printing characters (e.g. some unicode control chars) in preprocessing
UpperCAmelCase__ : int = re.compile(
F"[{''.join(map(__UpperCamelCase , list(range(0 , 9 ) ) + list(range(11 , 32 ) ) + list(range(1_27 , 1_60 ) ) + [1_60, 1_73, 82_03] ) )}]" )
def __getstate__( self )-> List[str]:
UpperCAmelCase__ : Optional[int] = self.__dict__.copy()
UpperCAmelCase__ : Dict = None
return state
def __setstate__( self , __UpperCamelCase )-> Any:
UpperCAmelCase__ : List[str] = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
UpperCAmelCase__ : List[Any] = {}
UpperCAmelCase__ : Optional[int] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
@property
# Copied from transformers.models.albert.tokenization_albert.AlbertTokenizer.vocab_size
def lowerCAmelCase__ ( self )-> int:
return len(self.sp_model )
def lowerCAmelCase__ ( self , __UpperCamelCase )-> str:
UpperCAmelCase__ : Optional[Any] = self.non_printing_characters_re.sub("" , __UpperCamelCase )
# Normalize whitespaces
UpperCAmelCase__ : Optional[Any] = "".join([char if char not in self.whitespaces else " " for char in text] )
# NFC Unicode normalization
UpperCAmelCase__ : int = unicodedata.normalize("NFC" , __UpperCamelCase )
return text
def lowerCAmelCase__ ( self , __UpperCamelCase , **__UpperCamelCase )-> List[str]:
UpperCAmelCase__ : List[Any] = self.preprocess_text(__UpperCamelCase )
return self.sp_model.encode(__UpperCamelCase , out_type=__UpperCamelCase )
def lowerCAmelCase__ ( self , __UpperCamelCase )-> int:
return self.sp_model.PieceToId(__UpperCamelCase )
def lowerCAmelCase__ ( self , __UpperCamelCase )-> str:
return self.sp_model.IdToPiece(__UpperCamelCase )
@staticmethod
def lowerCAmelCase__ ( __UpperCamelCase )-> str:
return out_string
def lowerCAmelCase__ ( self , __UpperCamelCase )-> str:
UpperCAmelCase__ : Any = []
UpperCAmelCase__ : Optional[Any] = ""
UpperCAmelCase__ : str = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
# TODO: Check if this is needed, as it ensures that decode(encode(doc)) != doc by adding extra whitespace in the decoded document
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(__UpperCamelCase ) + token
UpperCAmelCase__ : Optional[Any] = True
UpperCAmelCase__ : Optional[Any] = []
else:
current_sub_tokens.append(__UpperCamelCase )
UpperCAmelCase__ : List[str] = False
out_string += self.sp_model.decode(__UpperCamelCase )
return out_string
def lowerCAmelCase__ ( self )-> Dict[str, int]:
UpperCAmelCase__ : str = {self.convert_ids_to_tokens(__UpperCamelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def lowerCAmelCase__ ( self , __UpperCamelCase , __UpperCamelCase = None )-> Tuple[str]:
if not os.path.isdir(__UpperCamelCase ):
logger.error(F"Vocabulary path ({save_directory}) should be a directory" )
return
UpperCAmelCase__ : Union[str, Any] = os.path.join(
__UpperCamelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__UpperCamelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , __UpperCamelCase )
elif not os.path.isfile(self.vocab_file ):
with open(__UpperCamelCase , "wb" ) as fi:
UpperCAmelCase__ : Any = self.sp_model.serialized_model_proto()
fi.write(__UpperCamelCase )
return (out_vocab_file,)
def lowerCAmelCase__ ( self , __UpperCamelCase , __UpperCamelCase = False )-> Union[List[int], List[List[int]], "torch.Tensor"]:
if isinstance(__UpperCamelCase , __UpperCamelCase ):
UpperCAmelCase__ : List[Any] = self.preprocess_text(__UpperCamelCase )
UpperCAmelCase__ : int = self.sp_model.encode(__UpperCamelCase )
else:
UpperCAmelCase__ : Optional[Any] = [self.preprocess_text(__UpperCamelCase ) for t in text]
UpperCAmelCase__ : Tuple = self.sp_model.encode(__UpperCamelCase )
if return_tensors is True or return_tensors == "pt":
UpperCAmelCase__ : Union[str, Any] = torch.tensor(__UpperCamelCase )
return token_ids
def lowerCAmelCase__ ( self , __UpperCamelCase )-> str:
return self.sp_model.decode(__UpperCamelCase )
def lowerCAmelCase__ ( self , __UpperCamelCase )-> List[int]:
UpperCAmelCase__ : Optional[int] = [F"User: {text}" if is_user else F"Bot: {text}" for is_user, text in conversation.iter_texts()]
UpperCAmelCase__ : Optional[int] = (
F"{self.eos_token}{self.bos_token}" + F"{self.bos_token}".join(__UpperCamelCase ) + F"{self.bos_token}Bot:"
)
return self.encode(text=__UpperCamelCase )
| 660 |
"""simple docstring"""
# DISCLAIMER: This file is strongly influenced by https://github.com/ermongroup/ddim
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import flax
import jax
import jax.numpy as jnp
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils_flax import (
CommonSchedulerState,
FlaxKarrasDiffusionSchedulers,
FlaxSchedulerMixin,
FlaxSchedulerOutput,
add_noise_common,
get_velocity_common,
)
@flax.struct.dataclass
class _lowercase :
'''simple docstring'''
_A = 42
# setable values
_A = 42
_A = 42
_A = None
@classmethod
def lowerCAmelCase__ ( cls , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )-> Union[str, Any]:
return cls(common=__UpperCamelCase , init_noise_sigma=__UpperCamelCase , timesteps=__UpperCamelCase )
@dataclass
class _lowercase ( lowerCAmelCase_ ):
'''simple docstring'''
_A = 42
class _lowercase ( lowerCAmelCase_ , lowerCAmelCase_ ):
'''simple docstring'''
_A = [e.name for e in FlaxKarrasDiffusionSchedulers]
_A = 42
@property
def lowerCAmelCase__ ( self )-> Optional[int]:
return True
@register_to_config
def __init__( self , __UpperCamelCase = 10_00 , __UpperCamelCase = 0.0001 , __UpperCamelCase = 0.02 , __UpperCamelCase = "linear" , __UpperCamelCase = None , __UpperCamelCase = "fixed_small" , __UpperCamelCase = True , __UpperCamelCase = "epsilon" , __UpperCamelCase = jnp.floataa , )-> List[str]:
UpperCAmelCase__ : int = dtype
def lowerCAmelCase__ ( self , __UpperCamelCase = None )-> DDPMSchedulerState:
if common is None:
UpperCAmelCase__ : int = CommonSchedulerState.create(self )
# standard deviation of the initial noise distribution
UpperCAmelCase__ : Tuple = jnp.array(1.0 , dtype=self.dtype )
UpperCAmelCase__ : Tuple = jnp.arange(0 , self.config.num_train_timesteps ).round()[::-1]
return DDPMSchedulerState.create(
common=__UpperCamelCase , init_noise_sigma=__UpperCamelCase , timesteps=__UpperCamelCase , )
def lowerCAmelCase__ ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = None )-> jnp.ndarray:
return sample
def lowerCAmelCase__ ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = () )-> DDPMSchedulerState:
UpperCAmelCase__ : Dict = self.config.num_train_timesteps // num_inference_steps
# creates integer timesteps by multiplying by ratio
# rounding to avoid issues when num_inference_step is power of 3
UpperCAmelCase__ : Optional[int] = (jnp.arange(0 , __UpperCamelCase ) * step_ratio).round()[::-1]
return state.replace(
num_inference_steps=__UpperCamelCase , timesteps=__UpperCamelCase , )
def lowerCAmelCase__ ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase=None , __UpperCamelCase=None )-> Union[str, Any]:
UpperCAmelCase__ : Optional[Any] = state.common.alphas_cumprod[t]
UpperCAmelCase__ : int = jnp.where(t > 0 , state.common.alphas_cumprod[t - 1] , jnp.array(1.0 , dtype=self.dtype ) )
# For t > 0, compute predicted variance βt (see formula (6) and (7) from https://arxiv.org/pdf/2006.11239.pdf)
# and sample from it to get previous sample
# x_{t-1} ~ N(pred_prev_sample, variance) == add variance to pred_sample
UpperCAmelCase__ : Any = (1 - alpha_prod_t_prev) / (1 - alpha_prod_t) * state.common.betas[t]
if variance_type is None:
UpperCAmelCase__ : Union[str, Any] = self.config.variance_type
# hacks - were probably added for training stability
if variance_type == "fixed_small":
UpperCAmelCase__ : Dict = jnp.clip(__UpperCamelCase , a_min=1E-20 )
# for rl-diffuser https://arxiv.org/abs/2205.09991
elif variance_type == "fixed_small_log":
UpperCAmelCase__ : Tuple = jnp.log(jnp.clip(__UpperCamelCase , a_min=1E-20 ) )
elif variance_type == "fixed_large":
UpperCAmelCase__ : Union[str, Any] = state.common.betas[t]
elif variance_type == "fixed_large_log":
# Glide max_log
UpperCAmelCase__ : Optional[int] = jnp.log(state.common.betas[t] )
elif variance_type == "learned":
return predicted_variance
elif variance_type == "learned_range":
UpperCAmelCase__ : List[str] = variance
UpperCAmelCase__ : Union[str, Any] = state.common.betas[t]
UpperCAmelCase__ : Optional[int] = (predicted_variance + 1) / 2
UpperCAmelCase__ : Any = frac * max_log + (1 - frac) * min_log
return variance
def lowerCAmelCase__ ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = None , __UpperCamelCase = True , )-> Union[FlaxDDPMSchedulerOutput, Tuple]:
UpperCAmelCase__ : List[str] = timestep
if key is None:
UpperCAmelCase__ : int = jax.random.PRNGKey(0 )
if model_output.shape[1] == sample.shape[1] * 2 and self.config.variance_type in ["learned", "learned_range"]:
UpperCAmelCase__ , UpperCAmelCase__ : List[str] = jnp.split(__UpperCamelCase , sample.shape[1] , axis=1 )
else:
UpperCAmelCase__ : Optional[Any] = None
# 1. compute alphas, betas
UpperCAmelCase__ : Union[str, Any] = state.common.alphas_cumprod[t]
UpperCAmelCase__ : Tuple = jnp.where(t > 0 , state.common.alphas_cumprod[t - 1] , jnp.array(1.0 , dtype=self.dtype ) )
UpperCAmelCase__ : Union[str, Any] = 1 - alpha_prod_t
UpperCAmelCase__ : Tuple = 1 - alpha_prod_t_prev
# 2. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf
if self.config.prediction_type == "epsilon":
UpperCAmelCase__ : List[Any] = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
elif self.config.prediction_type == "sample":
UpperCAmelCase__ : Any = model_output
elif self.config.prediction_type == "v_prediction":
UpperCAmelCase__ : Union[str, Any] = (alpha_prod_t**0.5) * sample - (beta_prod_t**0.5) * model_output
else:
raise ValueError(
F"prediction_type given as {self.config.prediction_type} must be one of `epsilon`, `sample` "
" for the FlaxDDPMScheduler." )
# 3. Clip "predicted x_0"
if self.config.clip_sample:
UpperCAmelCase__ : List[Any] = jnp.clip(__UpperCamelCase , -1 , 1 )
# 4. Compute coefficients for pred_original_sample x_0 and current sample x_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
UpperCAmelCase__ : List[str] = (alpha_prod_t_prev ** 0.5 * state.common.betas[t]) / beta_prod_t
UpperCAmelCase__ : List[Any] = state.common.alphas[t] ** 0.5 * beta_prod_t_prev / beta_prod_t
# 5. Compute predicted previous sample µ_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
UpperCAmelCase__ : Tuple = pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample
# 6. Add noise
def random_variance():
UpperCAmelCase__ : Any = jax.random.split(__UpperCamelCase , num=1 )
UpperCAmelCase__ : int = jax.random.normal(__UpperCamelCase , shape=model_output.shape , dtype=self.dtype )
return (self._get_variance(__UpperCamelCase , __UpperCamelCase , predicted_variance=__UpperCamelCase ) ** 0.5) * noise
UpperCAmelCase__ : Dict = jnp.where(t > 0 , random_variance() , jnp.zeros(model_output.shape , dtype=self.dtype ) )
UpperCAmelCase__ : Any = pred_prev_sample + variance
if not return_dict:
return (pred_prev_sample, state)
return FlaxDDPMSchedulerOutput(prev_sample=__UpperCamelCase , state=__UpperCamelCase )
def lowerCAmelCase__ ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , )-> jnp.ndarray:
return add_noise_common(state.common , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
def lowerCAmelCase__ ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , )-> jnp.ndarray:
return get_velocity_common(state.common , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
def __len__( self )-> Tuple:
return self.config.num_train_timesteps
| 660 | 1 |
"""simple docstring"""
import argparse
import shutil
from pathlib import Path
from tqdm import tqdm
from transformers import AutoTokenizer
def a__ ( lowerCAmelCase : int , lowerCAmelCase : Dict , lowerCAmelCase : Tuple , lowerCAmelCase : Union[str, Any]=1024 ):
'''simple docstring'''
UpperCAmelCase__ , UpperCAmelCase__ : List[Any] = [], []
UpperCAmelCase__ : Optional[Any] = list(zip(lowerCAmelCase , lowerCAmelCase ) )
UpperCAmelCase__ , UpperCAmelCase__ : Optional[int] = sorted_examples[0]
def is_too_big(lowerCAmelCase : List[Any] ):
return tok(lowerCAmelCase , return_tensors="pt" ).input_ids.shape[1] > max_tokens
for src, tgt in tqdm(sorted_examples[1:] ):
UpperCAmelCase__ : Tuple = new_src + " " + src
UpperCAmelCase__ : str = new_tgt + " " + tgt
if is_too_big(lowerCAmelCase ) or is_too_big(lowerCAmelCase ): # cant fit, finalize example
finished_src.append(lowerCAmelCase )
finished_tgt.append(lowerCAmelCase )
UpperCAmelCase__ , UpperCAmelCase__ : int = src, tgt
else: # can fit, keep adding
UpperCAmelCase__ , UpperCAmelCase__ : int = cand_src, cand_tgt
# cleanup
if new_src:
assert new_tgt
finished_src.append(lowerCAmelCase )
finished_tgt.append(lowerCAmelCase )
return finished_src, finished_tgt
def a__ ( lowerCAmelCase : Optional[Any] , lowerCAmelCase : Path , lowerCAmelCase : Optional[Any] , lowerCAmelCase : Any ):
'''simple docstring'''
UpperCAmelCase__ : List[Any] = Path(lowerCAmelCase )
save_path.mkdir(exist_ok=lowerCAmelCase )
for split in ["train"]:
UpperCAmelCase__ , UpperCAmelCase__ : Dict = data_dir / F"{split}.source", data_dir / F"{split}.target"
UpperCAmelCase__ : Any = [x.rstrip() for x in Path(lowerCAmelCase ).open().readlines()]
UpperCAmelCase__ : str = [x.rstrip() for x in Path(lowerCAmelCase ).open().readlines()]
UpperCAmelCase__ , UpperCAmelCase__ : int = pack_examples(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
print(F"packed {split} split from {len(lowerCAmelCase )} examples -> {len(lowerCAmelCase )}." )
Path(save_path / F"{split}.source" ).open("w" ).write("\n".join(lowerCAmelCase ) )
Path(save_path / F"{split}.target" ).open("w" ).write("\n".join(lowerCAmelCase ) )
for split in ["val", "test"]:
UpperCAmelCase__ , UpperCAmelCase__ : List[str] = data_dir / F"{split}.source", data_dir / F"{split}.target"
shutil.copyfile(lowerCAmelCase , save_path / F"{split}.source" )
shutil.copyfile(lowerCAmelCase , save_path / F"{split}.target" )
def a__ ( ):
'''simple docstring'''
UpperCAmelCase__ : List[str] = argparse.ArgumentParser()
parser.add_argument("--tok_name" , type=lowerCAmelCase , help="like facebook/bart-large-cnn,t5-base, etc." )
parser.add_argument("--max_seq_len" , type=lowerCAmelCase , default=128 )
parser.add_argument("--data_dir" , type=lowerCAmelCase )
parser.add_argument("--save_path" , type=lowerCAmelCase )
UpperCAmelCase__ : str = parser.parse_args()
UpperCAmelCase__ : Dict = AutoTokenizer.from_pretrained(args.tok_name )
return pack_data_dir(lowerCAmelCase , Path(args.data_dir ) , args.max_seq_len , args.save_path )
if __name__ == "__main__":
packer_cli()
| 660 |
"""simple docstring"""
from pathlib import PurePosixPath
from typing import Optional
import fsspec
from fsspec import AbstractFileSystem
from huggingface_hub.hf_api import DatasetInfo
from ..utils.file_utils import get_authentication_headers_for_url
from ..utils.hub import hf_hub_url
class _lowercase ( lowerCAmelCase_ ):
'''simple docstring'''
_A = ''
_A = 'hf-legacy' # "hf://"" is reserved for hffs
def __init__( self , __UpperCamelCase = None , __UpperCamelCase = None , **__UpperCamelCase , )-> str:
super().__init__(self , **__UpperCamelCase )
UpperCAmelCase__ : int = repo_info
UpperCAmelCase__ : Optional[int] = token
UpperCAmelCase__ : Optional[Any] = None
def lowerCAmelCase__ ( self )-> Optional[Any]:
if self.dir_cache is None:
UpperCAmelCase__ : str = {}
for hf_file in self.repo_info.siblings:
# TODO(QL): add sizes
UpperCAmelCase__ : str = {
"name": hf_file.rfilename,
"size": None,
"type": "file",
}
self.dir_cache.update(
{
str(__UpperCamelCase ): {"name": str(__UpperCamelCase ), "size": None, "type": "directory"}
for d in list(PurePosixPath(hf_file.rfilename ).parents )[:-1]
} )
def lowerCAmelCase__ ( self , __UpperCamelCase , __UpperCamelCase = "rb" , **__UpperCamelCase , )-> List[Any]:
if not isinstance(self.repo_info , __UpperCamelCase ):
raise NotImplementedError(F"Open is only implemented for dataset repositories, but got {self.repo_info}" )
UpperCAmelCase__ : Union[str, Any] = hf_hub_url(self.repo_info.id , __UpperCamelCase , revision=self.repo_info.sha )
return fsspec.open(
__UpperCamelCase , mode=__UpperCamelCase , headers=get_authentication_headers_for_url(__UpperCamelCase , use_auth_token=self.token ) , client_kwargs={"trust_env": True} , ).open()
def lowerCAmelCase__ ( self , __UpperCamelCase , **__UpperCamelCase )-> List[str]:
self._get_dirs()
UpperCAmelCase__ : Union[str, Any] = self._strip_protocol(__UpperCamelCase )
if path in self.dir_cache:
return self.dir_cache[path]
else:
raise FileNotFoundError(__UpperCamelCase )
def lowerCAmelCase__ ( self , __UpperCamelCase , __UpperCamelCase=False , **__UpperCamelCase )-> str:
self._get_dirs()
UpperCAmelCase__ : str = PurePosixPath(path.strip("/" ) )
UpperCAmelCase__ : Optional[Any] = {}
for p, f in self.dir_cache.items():
UpperCAmelCase__ : Optional[int] = PurePosixPath(p.strip("/" ) )
UpperCAmelCase__ : Dict = p.parent
if root == path:
UpperCAmelCase__ : Tuple = f
UpperCAmelCase__ : List[Any] = list(paths.values() )
if detail:
return out
else:
return sorted(f["name"] for f in out )
| 660 | 1 |
"""simple docstring"""
from sklearn.metrics import recall_score
import datasets
A__ : Optional[int] = """
Recall is the fraction of the positive examples that were correctly labeled by the model as positive. It can be computed with the equation:
Recall = TP / (TP + FN)
Where TP is the true positives and FN is the false negatives.
"""
A__ : Any = """
Args:
- **predictions** (`list` of `int`): The predicted labels.
- **references** (`list` of `int`): The ground truth labels.
- **labels** (`list` of `int`): The set of labels to include when `average` is not set to `binary`, and their order when average is `None`. Labels present in the data can be excluded in this input, for example to calculate a multiclass average ignoring a majority negative class, while labels not present in the data will result in 0 components in a macro average. For multilabel targets, labels are column indices. By default, all labels in y_true and y_pred are used in sorted order. Defaults to None.
- **pos_label** (`int`): The class label to use as the 'positive class' when calculating the recall. Defaults to `1`.
- **average** (`string`): This parameter is required for multiclass/multilabel targets. If None, the scores for each class are returned. Otherwise, this determines the type of averaging performed on the data. Defaults to `'binary'`.
- `'binary'`: Only report results for the class specified by `pos_label`. This is applicable only if the target labels and predictions are binary.
- `'micro'`: Calculate metrics globally by counting the total true positives, false negatives, and false positives.
- `'macro'`: Calculate metrics for each label, and find their unweighted mean. This does not take label imbalance into account.
- `'weighted'`: Calculate metrics for each label, and find their average weighted by support (the number of true instances for each label). This alters `'macro'` to account for label imbalance. Note that it can result in an F-score that is not between precision and recall.
- `'samples'`: Calculate metrics for each instance, and find their average (only meaningful for multilabel classification).
- **sample_weight** (`list` of `float`): Sample weights Defaults to `None`.
- **zero_division** (): Sets the value to return when there is a zero division. Defaults to .
- `'warn'`: If there is a zero division, the return value is `0`, but warnings are also raised.
- `0`: If there is a zero division, the return value is `0`.
- `1`: If there is a zero division, the return value is `1`.
Returns:
- **recall** (`float`, or `array` of `float`): Either the general recall score, or the recall scores for individual classes, depending on the values input to `labels` and `average`. Minimum possible value is 0. Maximum possible value is 1. A higher recall means that more of the positive examples have been labeled correctly. Therefore, a higher recall is generally considered better.
Examples:
Example 1-A simple example with some errors
>>> recall_metric = datasets.load_metric('recall')
>>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1])
>>> print(results)
{'recall': 0.6666666666666666}
Example 2-The same example as Example 1, but with `pos_label=0` instead of the default `pos_label=1`.
>>> recall_metric = datasets.load_metric('recall')
>>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1], pos_label=0)
>>> print(results)
{'recall': 0.5}
Example 3-The same example as Example 1, but with `sample_weight` included.
>>> recall_metric = datasets.load_metric('recall')
>>> sample_weight = [0.9, 0.2, 0.9, 0.3, 0.8]
>>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1], sample_weight=sample_weight)
>>> print(results)
{'recall': 0.55}
Example 4-A multiclass example, using different averages.
>>> recall_metric = datasets.load_metric('recall')
>>> predictions = [0, 2, 1, 0, 0, 1]
>>> references = [0, 1, 2, 0, 1, 2]
>>> results = recall_metric.compute(predictions=predictions, references=references, average='macro')
>>> print(results)
{'recall': 0.3333333333333333}
>>> results = recall_metric.compute(predictions=predictions, references=references, average='micro')
>>> print(results)
{'recall': 0.3333333333333333}
>>> results = recall_metric.compute(predictions=predictions, references=references, average='weighted')
>>> print(results)
{'recall': 0.3333333333333333}
>>> results = recall_metric.compute(predictions=predictions, references=references, average=None)
>>> print(results)
{'recall': array([1., 0., 0.])}
"""
A__ : str = """
@article{scikit-learn, title={Scikit-learn: Machine Learning in {P}ython}, author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V. and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P. and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.}, journal={Journal of Machine Learning Research}, volume={12}, pages={2825--2830}, year={2011}
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _lowercase ( datasets.Metric ):
'''simple docstring'''
def lowerCAmelCase__ ( self )-> List[str]:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Sequence(datasets.Value("int32" ) ),
"references": datasets.Sequence(datasets.Value("int32" ) ),
}
if self.config_name == "multilabel"
else {
"predictions": datasets.Value("int32" ),
"references": datasets.Value("int32" ),
} ) , reference_urls=["https://scikit-learn.org/stable/modules/generated/sklearn.metrics.recall_score.html"] , )
def lowerCAmelCase__ ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase=None , __UpperCamelCase=1 , __UpperCamelCase="binary" , __UpperCamelCase=None , __UpperCamelCase="warn" , )-> Union[str, Any]:
UpperCAmelCase__ : Tuple = recall_score(
__UpperCamelCase , __UpperCamelCase , labels=__UpperCamelCase , pos_label=__UpperCamelCase , average=__UpperCamelCase , sample_weight=__UpperCamelCase , zero_division=__UpperCamelCase , )
return {"recall": float(__UpperCamelCase ) if score.size == 1 else score}
| 660 |
"""simple docstring"""
from typing import Dict, List, Optional, Union
import numpy as np
from transformers.utils import is_vision_available
from transformers.utils.generic import TensorType
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
is_valid_image,
to_numpy_array,
valid_images,
)
from ...utils import logging
if is_vision_available():
import PIL
A__ : Dict = logging.get_logger(__name__)
def a__ ( lowerCAmelCase : Optional[Any] ):
'''simple docstring'''
if isinstance(lowerCAmelCase , (list, tuple) ) and isinstance(videos[0] , (list, tuple) ) and is_valid_image(videos[0][0] ):
return videos
elif isinstance(lowerCAmelCase , (list, tuple) ) and is_valid_image(videos[0] ):
return [videos]
elif is_valid_image(lowerCAmelCase ):
return [[videos]]
raise ValueError(F"Could not make batched video from {videos}" )
class _lowercase ( lowerCAmelCase_ ):
'''simple docstring'''
_A = ['pixel_values']
def __init__( self , __UpperCamelCase = True , __UpperCamelCase = None , __UpperCamelCase = PILImageResampling.BILINEAR , __UpperCamelCase = True , __UpperCamelCase = None , __UpperCamelCase = True , __UpperCamelCase = 1 / 2_55 , __UpperCamelCase = True , __UpperCamelCase = True , __UpperCamelCase = None , __UpperCamelCase = None , **__UpperCamelCase , )-> None:
super().__init__(**__UpperCamelCase )
UpperCAmelCase__ : Optional[Any] = size if size is not None else {"shortest_edge": 2_56}
UpperCAmelCase__ : List[Any] = get_size_dict(__UpperCamelCase , default_to_square=__UpperCamelCase )
UpperCAmelCase__ : List[str] = crop_size if crop_size is not None else {"height": 2_24, "width": 2_24}
UpperCAmelCase__ : int = get_size_dict(__UpperCamelCase , param_name="crop_size" )
UpperCAmelCase__ : Dict = do_resize
UpperCAmelCase__ : Optional[int] = size
UpperCAmelCase__ : List[Any] = do_center_crop
UpperCAmelCase__ : str = crop_size
UpperCAmelCase__ : Optional[int] = resample
UpperCAmelCase__ : int = do_rescale
UpperCAmelCase__ : Union[str, Any] = rescale_factor
UpperCAmelCase__ : Union[str, Any] = offset
UpperCAmelCase__ : Dict = do_normalize
UpperCAmelCase__ : int = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
UpperCAmelCase__ : Union[str, Any] = image_std if image_std is not None else IMAGENET_STANDARD_STD
def lowerCAmelCase__ ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = PILImageResampling.BILINEAR , __UpperCamelCase = None , **__UpperCamelCase , )-> np.ndarray:
UpperCAmelCase__ : Optional[int] = get_size_dict(__UpperCamelCase , default_to_square=__UpperCamelCase )
if "shortest_edge" in size:
UpperCAmelCase__ : Union[str, Any] = get_resize_output_image_size(__UpperCamelCase , size["shortest_edge"] , default_to_square=__UpperCamelCase )
elif "height" in size and "width" in size:
UpperCAmelCase__ : Any = (size["height"], size["width"])
else:
raise ValueError(F"Size must have 'height' and 'width' or 'shortest_edge' as keys. Got {size.keys()}" )
return resize(__UpperCamelCase , size=__UpperCamelCase , resample=__UpperCamelCase , data_format=__UpperCamelCase , **__UpperCamelCase )
def lowerCAmelCase__ ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = None , **__UpperCamelCase , )-> np.ndarray:
UpperCAmelCase__ : Optional[Any] = get_size_dict(__UpperCamelCase )
if "height" not in size or "width" not in size:
raise ValueError(F"Size must have 'height' and 'width' as keys. Got {size.keys()}" )
return center_crop(__UpperCamelCase , size=(size["height"], size["width"]) , data_format=__UpperCamelCase , **__UpperCamelCase )
def lowerCAmelCase__ ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = True , __UpperCamelCase = None , **__UpperCamelCase , )-> Tuple:
UpperCAmelCase__ : str = image.astype(np.floataa )
if offset:
UpperCAmelCase__ : Tuple = image - (scale / 2)
return rescale(__UpperCamelCase , scale=__UpperCamelCase , data_format=__UpperCamelCase , **__UpperCamelCase )
def lowerCAmelCase__ ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = None , **__UpperCamelCase , )-> np.ndarray:
return normalize(__UpperCamelCase , mean=__UpperCamelCase , std=__UpperCamelCase , data_format=__UpperCamelCase , **__UpperCamelCase )
def lowerCAmelCase__ ( self , __UpperCamelCase , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = ChannelDimension.FIRST , )-> np.ndarray:
if do_resize and size is None or resample is None:
raise ValueError("Size and resample must be specified if do_resize is True." )
if do_center_crop and crop_size is None:
raise ValueError("Crop size must be specified if do_center_crop is True." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("Image mean and std must be specified if do_normalize is True." )
if offset and not do_rescale:
raise ValueError("For offset, do_rescale must also be set to True." )
# All transformations expect numpy arrays.
UpperCAmelCase__ : Optional[Any] = to_numpy_array(__UpperCamelCase )
if do_resize:
UpperCAmelCase__ : Union[str, Any] = self.resize(image=__UpperCamelCase , size=__UpperCamelCase , resample=__UpperCamelCase )
if do_center_crop:
UpperCAmelCase__ : int = self.center_crop(__UpperCamelCase , size=__UpperCamelCase )
if do_rescale:
UpperCAmelCase__ : List[str] = self.rescale(image=__UpperCamelCase , scale=__UpperCamelCase , offset=__UpperCamelCase )
if do_normalize:
UpperCAmelCase__ : List[Any] = self.normalize(image=__UpperCamelCase , mean=__UpperCamelCase , std=__UpperCamelCase )
UpperCAmelCase__ : Dict = to_channel_dimension_format(__UpperCamelCase , __UpperCamelCase )
return image
def lowerCAmelCase__ ( self , __UpperCamelCase , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = ChannelDimension.FIRST , **__UpperCamelCase , )-> PIL.Image.Image:
UpperCAmelCase__ : Optional[int] = do_resize if do_resize is not None else self.do_resize
UpperCAmelCase__ : int = resample if resample is not None else self.resample
UpperCAmelCase__ : Tuple = do_center_crop if do_center_crop is not None else self.do_center_crop
UpperCAmelCase__ : int = do_rescale if do_rescale is not None else self.do_rescale
UpperCAmelCase__ : Any = rescale_factor if rescale_factor is not None else self.rescale_factor
UpperCAmelCase__ : Optional[int] = offset if offset is not None else self.offset
UpperCAmelCase__ : Dict = do_normalize if do_normalize is not None else self.do_normalize
UpperCAmelCase__ : Dict = image_mean if image_mean is not None else self.image_mean
UpperCAmelCase__ : Optional[int] = image_std if image_std is not None else self.image_std
UpperCAmelCase__ : List[str] = size if size is not None else self.size
UpperCAmelCase__ : Optional[int] = get_size_dict(__UpperCamelCase , default_to_square=__UpperCamelCase )
UpperCAmelCase__ : Dict = crop_size if crop_size is not None else self.crop_size
UpperCAmelCase__ : Tuple = get_size_dict(__UpperCamelCase , param_name="crop_size" )
if not valid_images(__UpperCamelCase ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
UpperCAmelCase__ : List[str] = make_batched(__UpperCamelCase )
UpperCAmelCase__ : Optional[Any] = [
[
self._preprocess_image(
image=__UpperCamelCase , do_resize=__UpperCamelCase , size=__UpperCamelCase , resample=__UpperCamelCase , do_center_crop=__UpperCamelCase , crop_size=__UpperCamelCase , do_rescale=__UpperCamelCase , rescale_factor=__UpperCamelCase , offset=__UpperCamelCase , do_normalize=__UpperCamelCase , image_mean=__UpperCamelCase , image_std=__UpperCamelCase , data_format=__UpperCamelCase , )
for img in video
]
for video in videos
]
UpperCAmelCase__ : Dict = {"pixel_values": videos}
return BatchFeature(data=__UpperCamelCase , tensor_type=__UpperCamelCase )
| 660 | 1 |
"""simple docstring"""
import argparse
import torch
from transformers import MobileBertConfig, MobileBertForPreTraining, load_tf_weights_in_mobilebert
from transformers.utils import logging
logging.set_verbosity_info()
def a__ ( lowerCAmelCase : str , lowerCAmelCase : Optional[Any] , lowerCAmelCase : Dict ):
'''simple docstring'''
# Initialise PyTorch model
UpperCAmelCase__ : Tuple = MobileBertConfig.from_json_file(lowerCAmelCase )
print(F"Building PyTorch model from configuration: {config}" )
UpperCAmelCase__ : str = MobileBertForPreTraining(lowerCAmelCase )
# Load weights from tf checkpoint
UpperCAmelCase__ : Optional[Any] = load_tf_weights_in_mobilebert(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
# Save pytorch-model
print(F"Save PyTorch model to {pytorch_dump_path}" )
torch.save(model.state_dict() , lowerCAmelCase )
if __name__ == "__main__":
A__ : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--tf_checkpoint_path""", default=None, type=str, required=True, help="""Path to the TensorFlow checkpoint path."""
)
parser.add_argument(
"""--mobilebert_config_file""",
default=None,
type=str,
required=True,
help=(
"""The config json file corresponding to the pre-trained MobileBERT model. \n"""
"""This specifies the model architecture."""
),
)
parser.add_argument(
"""--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
A__ : List[str] = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.mobilebert_config_file, args.pytorch_dump_path)
| 660 |
"""simple docstring"""
def a__ ( lowerCAmelCase : int ):
'''simple docstring'''
if a < 0:
raise ValueError("Input value must be a positive integer" )
elif isinstance(lowerCAmelCase , lowerCAmelCase ):
raise TypeError("Input value must be a 'int' type" )
return bin(lowerCAmelCase ).count("1" )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 660 | 1 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
A__ : Dict = {
"""albert-base-v1""": """https://huggingface.co/albert-base-v1/resolve/main/config.json""",
"""albert-large-v1""": """https://huggingface.co/albert-large-v1/resolve/main/config.json""",
"""albert-xlarge-v1""": """https://huggingface.co/albert-xlarge-v1/resolve/main/config.json""",
"""albert-xxlarge-v1""": """https://huggingface.co/albert-xxlarge-v1/resolve/main/config.json""",
"""albert-base-v2""": """https://huggingface.co/albert-base-v2/resolve/main/config.json""",
"""albert-large-v2""": """https://huggingface.co/albert-large-v2/resolve/main/config.json""",
"""albert-xlarge-v2""": """https://huggingface.co/albert-xlarge-v2/resolve/main/config.json""",
"""albert-xxlarge-v2""": """https://huggingface.co/albert-xxlarge-v2/resolve/main/config.json""",
}
class _lowercase ( lowerCAmelCase_ ):
'''simple docstring'''
_A = 'albert'
def __init__( self , __UpperCamelCase=3_00_00 , __UpperCamelCase=1_28 , __UpperCamelCase=40_96 , __UpperCamelCase=12 , __UpperCamelCase=1 , __UpperCamelCase=64 , __UpperCamelCase=1_63_84 , __UpperCamelCase=1 , __UpperCamelCase="gelu_new" , __UpperCamelCase=0 , __UpperCamelCase=0 , __UpperCamelCase=5_12 , __UpperCamelCase=2 , __UpperCamelCase=0.02 , __UpperCamelCase=1E-12 , __UpperCamelCase=0.1 , __UpperCamelCase="absolute" , __UpperCamelCase=0 , __UpperCamelCase=2 , __UpperCamelCase=3 , **__UpperCamelCase , )-> int:
super().__init__(pad_token_id=__UpperCamelCase , bos_token_id=__UpperCamelCase , eos_token_id=__UpperCamelCase , **__UpperCamelCase )
UpperCAmelCase__ : List[Any] = vocab_size
UpperCAmelCase__ : Tuple = embedding_size
UpperCAmelCase__ : Optional[Any] = hidden_size
UpperCAmelCase__ : int = num_hidden_layers
UpperCAmelCase__ : Any = num_hidden_groups
UpperCAmelCase__ : int = num_attention_heads
UpperCAmelCase__ : str = inner_group_num
UpperCAmelCase__ : List[Any] = hidden_act
UpperCAmelCase__ : Optional[Any] = intermediate_size
UpperCAmelCase__ : Tuple = hidden_dropout_prob
UpperCAmelCase__ : Dict = attention_probs_dropout_prob
UpperCAmelCase__ : Optional[int] = max_position_embeddings
UpperCAmelCase__ : Dict = type_vocab_size
UpperCAmelCase__ : str = initializer_range
UpperCAmelCase__ : Dict = layer_norm_eps
UpperCAmelCase__ : Union[str, Any] = classifier_dropout_prob
UpperCAmelCase__ : int = position_embedding_type
class _lowercase ( lowerCAmelCase_ ):
'''simple docstring'''
@property
def lowerCAmelCase__ ( self )-> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
UpperCAmelCase__ : int = {0: "batch", 1: "choice", 2: "sequence"}
else:
UpperCAmelCase__ : List[str] = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
("token_type_ids", dynamic_axis),
] )
| 660 |
"""simple docstring"""
import random
import timeit
from functools import wraps
from typing import Callable, Optional
from ..configuration_utils import PretrainedConfig
from ..models.auto.modeling_tf_auto import TF_MODEL_MAPPING, TF_MODEL_WITH_LM_HEAD_MAPPING
from ..utils import is_pyanvml_available, is_tf_available, logging
from .benchmark_utils import (
Benchmark,
Memory,
MemorySummary,
measure_peak_memory_cpu,
start_memory_tracing,
stop_memory_tracing,
)
if is_tf_available():
import tensorflow as tf
from tensorflow.python.framework.errors_impl import ResourceExhaustedError
from .benchmark_args_tf import TensorFlowBenchmarkArguments
if is_pyanvml_available():
import pyanvml.pyanvml as nvml
A__ : Optional[Any] = logging.get_logger(__name__)
def a__ ( lowerCAmelCase : bool , lowerCAmelCase : bool ):
'''simple docstring'''
def run_func(lowerCAmelCase : Dict ):
@wraps(lowerCAmelCase )
def run_in_eager_mode(*lowerCAmelCase : Optional[Any] , **lowerCAmelCase : Dict ):
return func(*lowerCAmelCase , **lowerCAmelCase )
@wraps(lowerCAmelCase )
@tf.function(experimental_compile=lowerCAmelCase )
def run_in_graph_mode(*lowerCAmelCase : Optional[Any] , **lowerCAmelCase : Optional[Any] ):
return func(*lowerCAmelCase , **lowerCAmelCase )
if do_eager_mode is True:
if use_xla is not False:
raise ValueError(
"Cannot run model in XLA, if `args.eager_mode` is set to `True`. Please set `args.eager_mode=False`." )
return run_in_eager_mode
else:
return run_in_graph_mode
return run_func
def a__ ( lowerCAmelCase : int , lowerCAmelCase : int , lowerCAmelCase : int ):
'''simple docstring'''
UpperCAmelCase__ : Dict = random.Random()
UpperCAmelCase__ : List[str] = [rng.randint(0 , vocab_size - 1 ) for i in range(batch_size * sequence_length )]
return tf.constant(lowerCAmelCase , shape=(batch_size, sequence_length) , dtype=tf.intaa )
class _lowercase ( lowerCAmelCase_ ):
'''simple docstring'''
_A = 42
_A = 42
_A = "TensorFlow"
@property
def lowerCAmelCase__ ( self )-> Optional[int]:
return tf.__version__
def lowerCAmelCase__ ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )-> float:
# initialize GPU on separate process
UpperCAmelCase__ : Any = self.args.strategy
if strategy is None:
raise ValueError("A device strategy has to be initialized before using TensorFlow." )
UpperCAmelCase__ : Union[str, Any] = self._prepare_inference_func(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
return self._measure_speed(_inference )
def lowerCAmelCase__ ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )-> float:
UpperCAmelCase__ : List[Any] = self.args.strategy
if strategy is None:
raise ValueError("A device strategy has to be initialized before using TensorFlow." )
UpperCAmelCase__ : List[Any] = self._prepare_train_func(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
return self._measure_speed(_train )
def lowerCAmelCase__ ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )-> [Memory, Optional[MemorySummary]]:
# initialize GPU on separate process
if self.args.is_gpu:
tf.config.experimental.set_memory_growth(self.args.gpu_list[self.args.device_idx] , __UpperCamelCase )
UpperCAmelCase__ : List[str] = self.args.strategy
if strategy is None:
raise ValueError("A device strategy has to be initialized before using TensorFlow." )
UpperCAmelCase__ : Any = self._prepare_inference_func(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
return self._measure_memory(_inference )
def lowerCAmelCase__ ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )-> [Memory, Optional[MemorySummary]]:
if self.args.is_gpu:
tf.config.experimental.set_memory_growth(self.args.gpu_list[self.args.device_idx] , __UpperCamelCase )
UpperCAmelCase__ : Any = self.args.strategy
if strategy is None:
raise ValueError("A device strategy has to be initialized before using TensorFlow." )
UpperCAmelCase__ : Optional[Any] = self._prepare_train_func(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
return self._measure_memory(_train )
def lowerCAmelCase__ ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )-> Callable[[], None]:
UpperCAmelCase__ : Union[str, Any] = self.config_dict[model_name]
if self.args.fpaa:
raise NotImplementedError("Mixed precision is currently not supported." )
UpperCAmelCase__ : Optional[int] = (
hasattr(__UpperCamelCase , "architectures" )
and isinstance(config.architectures , __UpperCamelCase )
and len(config.architectures ) > 0
)
if not self.args.only_pretrain_model and has_model_class_in_config:
try:
UpperCAmelCase__ : str = "TF" + config.architectures[0] # prepend 'TF' for tensorflow model
UpperCAmelCase__ : Any = __import__("transformers" , fromlist=[model_class] )
UpperCAmelCase__ : List[Any] = getattr(__UpperCamelCase , __UpperCamelCase )
UpperCAmelCase__ : Dict = model_cls(__UpperCamelCase )
except ImportError:
raise ImportError(
F"{model_class} does not exist. If you just want to test the pretrained model, you might want to"
" set `--only_pretrain_model` or `args.only_pretrain_model=True`." )
else:
UpperCAmelCase__ : Any = TF_MODEL_MAPPING[config.__class__](__UpperCamelCase )
# encoder-decoder has vocab size saved differently
UpperCAmelCase__ : int = config.vocab_size if hasattr(__UpperCamelCase , "vocab_size" ) else config.encoder.vocab_size
UpperCAmelCase__ : Optional[Any] = random_input_ids(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
@run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla )
def encoder_decoder_forward():
return model(__UpperCamelCase , decoder_input_ids=__UpperCamelCase , training=__UpperCamelCase )
@run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla )
def encoder_forward():
return model(__UpperCamelCase , training=__UpperCamelCase )
UpperCAmelCase__ : Dict = encoder_decoder_forward if config.is_encoder_decoder else encoder_forward
return _inference
def lowerCAmelCase__ ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )-> Callable[[], None]:
UpperCAmelCase__ : List[Any] = self.config_dict[model_name]
if self.args.eager_mode is not False:
raise ValueError("Training cannot be done in eager mode. Please make sure that `args.eager_mode = False`." )
if self.args.fpaa:
raise NotImplementedError("Mixed precision is currently not supported." )
UpperCAmelCase__ : Any = (
hasattr(__UpperCamelCase , "architectures" )
and isinstance(config.architectures , __UpperCamelCase )
and len(config.architectures ) > 0
)
if not self.args.only_pretrain_model and has_model_class_in_config:
try:
UpperCAmelCase__ : Any = "TF" + config.architectures[0] # prepend 'TF' for tensorflow model
UpperCAmelCase__ : int = __import__("transformers" , fromlist=[model_class] )
UpperCAmelCase__ : int = getattr(__UpperCamelCase , __UpperCamelCase )
UpperCAmelCase__ : Optional[Any] = model_cls(__UpperCamelCase )
except ImportError:
raise ImportError(
F"{model_class} does not exist. If you just want to test the pretrained model, you might want to"
" set `--only_pretrain_model` or `args.only_pretrain_model=True`." )
else:
UpperCAmelCase__ : List[str] = TF_MODEL_WITH_LM_HEAD_MAPPING[config.__class__](__UpperCamelCase )
# encoder-decoder has vocab size saved differently
UpperCAmelCase__ : Union[str, Any] = config.vocab_size if hasattr(__UpperCamelCase , "vocab_size" ) else config.encoder.vocab_size
UpperCAmelCase__ : Dict = random_input_ids(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
@run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla )
def encoder_decoder_train():
UpperCAmelCase__ : Union[str, Any] = model(__UpperCamelCase , decoder_input_ids=__UpperCamelCase , labels=__UpperCamelCase , training=__UpperCamelCase )[0]
UpperCAmelCase__ : Union[str, Any] = tf.gradients(__UpperCamelCase , model.trainable_variables )
return gradients
@run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla )
def encoder_train():
UpperCAmelCase__ : Union[str, Any] = model(__UpperCamelCase , labels=__UpperCamelCase , training=__UpperCamelCase )[0]
UpperCAmelCase__ : Any = tf.gradients(__UpperCamelCase , model.trainable_variables )
return gradients
UpperCAmelCase__ : str = encoder_decoder_train if config.is_encoder_decoder else encoder_train
return _train
def lowerCAmelCase__ ( self , __UpperCamelCase )-> float:
with self.args.strategy.scope():
try:
if self.args.is_tpu or self.args.use_xla:
# run additional 10 times to stabilize compilation for tpu
logger.info("Do inference on TPU. Running model 5 times to stabilize compilation" )
timeit.repeat(__UpperCamelCase , repeat=1 , number=5 )
# as written in https://docs.python.org/2/library/timeit.html#timeit.Timer.repeat, min should be taken rather than the average
UpperCAmelCase__ : Optional[Any] = timeit.repeat(
__UpperCamelCase , repeat=self.args.repeat , number=10 , )
return min(__UpperCamelCase ) / 10.0
except ResourceExhaustedError as e:
self.print_fn(F"Doesn't fit on GPU. {e}" )
def lowerCAmelCase__ ( self , __UpperCamelCase )-> [Memory, MemorySummary]:
logger.info(
"Note that TensorFlow allocates more memory than "
"it might need to speed up computation. "
"The memory reported here corresponds to the memory "
"reported by `nvidia-smi`, which can vary depending "
"on total available memory on the GPU that is used." )
with self.args.strategy.scope():
try:
if self.args.trace_memory_line_by_line:
if not self.args.eager_mode:
raise ValueError(
"`args.eager_mode` is set to `False`. Make sure to run model in eager mode to measure memory"
" consumption line by line." )
UpperCAmelCase__ : List[str] = start_memory_tracing("transformers" )
if self.args.is_tpu:
# tpu
raise NotImplementedError(
"Memory Benchmarking is currently not implemented for TPU. Please disable memory benchmarking"
" with `args.memory=False`" )
elif self.args.is_gpu:
# gpu
if not is_pyanvml_available():
logger.warning(
"py3nvml not installed, we won't log GPU memory usage. "
"Install py3nvml (pip install py3nvml) to log information about GPU." )
UpperCAmelCase__ : Optional[int] = "N/A"
else:
logger.info(
"Measuring total GPU usage on GPU device. Make sure to not have additional processes"
" running on the same GPU." )
# init nvml
nvml.nvmlInit()
func()
UpperCAmelCase__ : Any = nvml.nvmlDeviceGetHandleByIndex(self.args.device_idx )
UpperCAmelCase__ : Optional[int] = nvml.nvmlDeviceGetMemoryInfo(__UpperCamelCase )
UpperCAmelCase__ : str = meminfo.used
UpperCAmelCase__ : int = Memory(__UpperCamelCase )
# shutdown nvml
nvml.nvmlShutdown()
else:
# cpu
if self.args.trace_memory_line_by_line:
logger.info(
"When enabling line by line tracing, the max peak memory for CPU is inaccurate in"
" TensorFlow." )
UpperCAmelCase__ : Any = None
else:
UpperCAmelCase__ : List[Any] = measure_peak_memory_cpu(__UpperCamelCase )
UpperCAmelCase__ : Optional[Any] = Memory(__UpperCamelCase ) if isinstance(__UpperCamelCase , __UpperCamelCase ) else memory_bytes
if self.args.trace_memory_line_by_line:
UpperCAmelCase__ : Optional[Any] = stop_memory_tracing(__UpperCamelCase )
if memory is None:
UpperCAmelCase__ : Tuple = summary.total
else:
UpperCAmelCase__ : int = None
return memory, summary
except ResourceExhaustedError as e:
self.print_fn(F"Doesn't fit on GPU. {e}" )
return "N/A", None
| 660 | 1 |
"""simple docstring"""
import argparse
import math
import traceback
import dateutil.parser as date_parser
import requests
def a__ ( lowerCAmelCase : Optional[int] ):
'''simple docstring'''
UpperCAmelCase__ : Dict = {}
UpperCAmelCase__ : Optional[Any] = job["started_at"]
UpperCAmelCase__ : Tuple = job["completed_at"]
UpperCAmelCase__ : str = date_parser.parse(lowerCAmelCase )
UpperCAmelCase__ : List[Any] = date_parser.parse(lowerCAmelCase )
UpperCAmelCase__ : Optional[int] = round((end_datetime - start_datetime).total_seconds() / 60.0 )
UpperCAmelCase__ : Union[str, Any] = start
UpperCAmelCase__ : Tuple = end
UpperCAmelCase__ : Optional[int] = duration_in_min
return job_info
def a__ ( lowerCAmelCase : Tuple , lowerCAmelCase : Any=None ):
'''simple docstring'''
UpperCAmelCase__ : int = None
if token is not None:
UpperCAmelCase__ : Union[str, Any] = {"Accept": "application/vnd.github+json", "Authorization": F"Bearer {token}"}
UpperCAmelCase__ : Dict = F"https://api.github.com/repos/huggingface/transformers/actions/runs/{workflow_run_id}/jobs?per_page=100"
UpperCAmelCase__ : Optional[Any] = requests.get(lowerCAmelCase , headers=lowerCAmelCase ).json()
UpperCAmelCase__ : Optional[Any] = {}
try:
job_time.update({job["name"]: extract_time_from_single_job(lowerCAmelCase ) for job in result["jobs"]} )
UpperCAmelCase__ : int = math.ceil((result["total_count"] - 100) / 100 )
for i in range(lowerCAmelCase ):
UpperCAmelCase__ : Optional[int] = requests.get(url + F"&page={i + 2}" , headers=lowerCAmelCase ).json()
job_time.update({job["name"]: extract_time_from_single_job(lowerCAmelCase ) for job in result["jobs"]} )
return job_time
except Exception:
print(F"Unknown error, could not fetch links:\n{traceback.format_exc()}" )
return {}
if __name__ == "__main__":
A__ : Tuple = argparse.ArgumentParser()
# Required parameters
parser.add_argument("""--workflow_run_id""", type=str, required=True, help="""A GitHub Actions workflow run id.""")
A__ : Dict = parser.parse_args()
A__ : Optional[Any] = get_job_time(args.workflow_run_id)
A__ : str = dict(sorted(job_time.items(), key=lambda item: item[1]["duration"], reverse=True))
for k, v in job_time.items():
print(f"""{k}: {v["duration"]}""")
| 660 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ....utils import _LazyModule
A__ : List[str] = {"""tokenization_tapex""": ["""TapexTokenizer"""]}
if TYPE_CHECKING:
from .tokenization_tapex import TapexTokenizer
else:
import sys
A__ : Any = _LazyModule(__name__, globals()["""__file__"""], _import_structure)
| 660 | 1 |
"""simple docstring"""
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_camembert import CamembertTokenizer
else:
A__ : List[str] = None
A__ : Union[str, Any] = logging.get_logger(__name__)
A__ : Optional[Any] = {"""vocab_file""": """sentencepiece.bpe.model""", """tokenizer_file""": """tokenizer.json"""}
A__ : Optional[Any] = {
"""vocab_file""": {
"""camembert-base""": """https://huggingface.co/camembert-base/resolve/main/sentencepiece.bpe.model""",
},
"""tokenizer_file""": {
"""camembert-base""": """https://huggingface.co/camembert-base/resolve/main/tokenizer.json""",
},
}
A__ : List[str] = {
"""camembert-base""": 512,
}
A__ : str = """▁"""
class _lowercase ( lowerCAmelCase_ ):
'''simple docstring'''
_A = VOCAB_FILES_NAMES
_A = PRETRAINED_VOCAB_FILES_MAP
_A = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_A = ['input_ids', 'attention_mask']
_A = CamembertTokenizer
def __init__( self , __UpperCamelCase=None , __UpperCamelCase=None , __UpperCamelCase="<s>" , __UpperCamelCase="</s>" , __UpperCamelCase="</s>" , __UpperCamelCase="<s>" , __UpperCamelCase="<unk>" , __UpperCamelCase="<pad>" , __UpperCamelCase="<mask>" , __UpperCamelCase=["<s>NOTUSED", "</s>NOTUSED"] , **__UpperCamelCase , )-> Dict:
# Mask token behave like a normal word, i.e. include the space before it
UpperCAmelCase__ : str = AddedToken(__UpperCamelCase , lstrip=__UpperCamelCase , rstrip=__UpperCamelCase ) if isinstance(__UpperCamelCase , __UpperCamelCase ) else mask_token
super().__init__(
__UpperCamelCase , tokenizer_file=__UpperCamelCase , bos_token=__UpperCamelCase , eos_token=__UpperCamelCase , sep_token=__UpperCamelCase , cls_token=__UpperCamelCase , unk_token=__UpperCamelCase , pad_token=__UpperCamelCase , mask_token=__UpperCamelCase , additional_special_tokens=__UpperCamelCase , **__UpperCamelCase , )
UpperCAmelCase__ : str = vocab_file
UpperCAmelCase__ : Union[str, Any] = False if not self.vocab_file else True
def lowerCAmelCase__ ( self , __UpperCamelCase , __UpperCamelCase = None )-> List[int]:
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
UpperCAmelCase__ : Union[str, Any] = [self.cls_token_id]
UpperCAmelCase__ : Optional[int] = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def lowerCAmelCase__ ( self , __UpperCamelCase , __UpperCamelCase = None )-> List[int]:
UpperCAmelCase__ : int = [self.sep_token_id]
UpperCAmelCase__ : List[str] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def lowerCAmelCase__ ( self , __UpperCamelCase , __UpperCamelCase = None )-> Tuple[str]:
if not self.can_save_slow_tokenizer:
raise ValueError(
"Your fast tokenizer does not have the necessary information to save the vocabulary for a slow "
"tokenizer." )
if not os.path.isdir(__UpperCamelCase ):
logger.error(F"Vocabulary path ({save_directory}) should be a directory" )
return
UpperCAmelCase__ : int = os.path.join(
__UpperCamelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__UpperCamelCase ):
copyfile(self.vocab_file , __UpperCamelCase )
return (out_vocab_file,)
| 660 |
"""simple docstring"""
from typing import Optional, Union
import torch
from torch import nn
from ...configuration_utils import ConfigMixin, register_to_config
from ...models.modeling_utils import ModelMixin
class _lowercase ( lowerCAmelCase_ , lowerCAmelCase_ ):
'''simple docstring'''
@register_to_config
def __init__( self , __UpperCamelCase = 7_68 , )-> Union[str, Any]:
super().__init__()
UpperCAmelCase__ : str = nn.Parameter(torch.zeros(1 , __UpperCamelCase ) )
UpperCAmelCase__ : Optional[int] = nn.Parameter(torch.ones(1 , __UpperCamelCase ) )
def lowerCAmelCase__ ( self , __UpperCamelCase = None , __UpperCamelCase = None , )-> Any:
UpperCAmelCase__ : Dict = nn.Parameter(self.mean.to(__UpperCamelCase ).to(__UpperCamelCase ) )
UpperCAmelCase__ : Any = nn.Parameter(self.std.to(__UpperCamelCase ).to(__UpperCamelCase ) )
return self
def lowerCAmelCase__ ( self , __UpperCamelCase )-> Union[str, Any]:
UpperCAmelCase__ : Dict = (embeds - self.mean) * 1.0 / self.std
return embeds
def lowerCAmelCase__ ( self , __UpperCamelCase )-> List[Any]:
UpperCAmelCase__ : Any = (embeds * self.std) + self.mean
return embeds
| 660 | 1 |
"""simple docstring"""
def a__ ( lowerCAmelCase : int = 200 ):
'''simple docstring'''
UpperCAmelCase__ : List[str] = [1, 2, 5, 10, 20, 50, 100, 200]
UpperCAmelCase__ : List[str] = [0] * (pence + 1)
UpperCAmelCase__ : Optional[Any] = 1 # base case: 1 way to make 0 pence
for coin in coins:
for i in range(lowerCAmelCase , pence + 1 , 1 ):
number_of_ways[i] += number_of_ways[i - coin]
return number_of_ways[pence]
if __name__ == "__main__":
assert solution(200) == 73_682
| 660 |
"""simple docstring"""
import argparse
import torch
from transformers import GPTaConfig, GPTaModel, load_tf_weights_in_gpta
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
logging.set_verbosity_info()
def a__ ( lowerCAmelCase : List[Any] , lowerCAmelCase : Any , lowerCAmelCase : List[Any] ):
'''simple docstring'''
# Construct model
if gpta_config_file == "":
UpperCAmelCase__ : Optional[int] = GPTaConfig()
else:
UpperCAmelCase__ : Dict = GPTaConfig.from_json_file(lowerCAmelCase )
UpperCAmelCase__ : Optional[Any] = GPTaModel(lowerCAmelCase )
# Load weights from numpy
load_tf_weights_in_gpta(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
# Save pytorch-model
UpperCAmelCase__ : Optional[int] = pytorch_dump_folder_path + "/" + WEIGHTS_NAME
UpperCAmelCase__ : Any = pytorch_dump_folder_path + "/" + CONFIG_NAME
print(F"Save PyTorch model to {pytorch_weights_dump_path}" )
torch.save(model.state_dict() , lowerCAmelCase )
print(F"Save configuration file to {pytorch_config_dump_path}" )
with open(lowerCAmelCase , "w" , encoding="utf-8" ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
A__ : Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--gpt2_checkpoint_path""", default=None, type=str, required=True, help="""Path to the TensorFlow checkpoint path."""
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
parser.add_argument(
"""--gpt2_config_file""",
default="""""",
type=str,
help=(
"""An optional config json file corresponding to the pre-trained OpenAI model. \n"""
"""This specifies the model architecture."""
),
)
A__ : Optional[Any] = parser.parse_args()
convert_gpta_checkpoint_to_pytorch(args.gpta_checkpoint_path, args.gpta_config_file, args.pytorch_dump_folder_path)
| 660 | 1 |
"""simple docstring"""
import itertools
import random
import unittest
import numpy as np
from transformers import BatchFeature, SpeechTaFeatureExtractor
from transformers.testing_utils import require_torch
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_torch_available():
import torch
A__ : Tuple = random.Random()
def a__ ( lowerCAmelCase : Tuple , lowerCAmelCase : Dict=1.0 , lowerCAmelCase : int=None , lowerCAmelCase : List[str]=None ):
'''simple docstring'''
if rng is None:
UpperCAmelCase__ : Union[str, Any] = global_rng
UpperCAmelCase__ : Any = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
@require_torch
class _lowercase ( unittest.TestCase ):
'''simple docstring'''
def __init__( self , __UpperCamelCase , __UpperCamelCase=7 , __UpperCamelCase=4_00 , __UpperCamelCase=20_00 , __UpperCamelCase=1 , __UpperCamelCase=0.0 , __UpperCamelCase=1_60_00 , __UpperCamelCase=True , __UpperCamelCase=80 , __UpperCamelCase=16 , __UpperCamelCase=64 , __UpperCamelCase="hann_window" , __UpperCamelCase=80 , __UpperCamelCase=76_00 , __UpperCamelCase=1E-10 , __UpperCamelCase=True , )-> Tuple:
UpperCAmelCase__ : List[str] = parent
UpperCAmelCase__ : Union[str, Any] = batch_size
UpperCAmelCase__ : Any = min_seq_length
UpperCAmelCase__ : Optional[Any] = max_seq_length
UpperCAmelCase__ : Optional[int] = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
UpperCAmelCase__ : List[str] = feature_size
UpperCAmelCase__ : int = padding_value
UpperCAmelCase__ : Optional[Any] = sampling_rate
UpperCAmelCase__ : Optional[int] = do_normalize
UpperCAmelCase__ : Union[str, Any] = num_mel_bins
UpperCAmelCase__ : Any = hop_length
UpperCAmelCase__ : Optional[int] = win_length
UpperCAmelCase__ : Tuple = win_function
UpperCAmelCase__ : Dict = fmin
UpperCAmelCase__ : Dict = fmax
UpperCAmelCase__ : str = mel_floor
UpperCAmelCase__ : Any = return_attention_mask
def lowerCAmelCase__ ( self )-> int:
return {
"feature_size": self.feature_size,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"do_normalize": self.do_normalize,
"num_mel_bins": self.num_mel_bins,
"hop_length": self.hop_length,
"win_length": self.win_length,
"win_function": self.win_function,
"fmin": self.fmin,
"fmax": self.fmax,
"mel_floor": self.mel_floor,
"return_attention_mask": self.return_attention_mask,
}
def lowerCAmelCase__ ( self , __UpperCamelCase=False , __UpperCamelCase=False )-> Optional[int]:
def _flatten(__UpperCamelCase ):
return list(itertools.chain(*__UpperCamelCase ) )
if equal_length:
UpperCAmelCase__ : Tuple = floats_list((self.batch_size, self.max_seq_length) )
else:
# make sure that inputs increase in size
UpperCAmelCase__ : List[str] = [
_flatten(floats_list((x, self.feature_size) ) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
UpperCAmelCase__ : Optional[Any] = [np.asarray(__UpperCamelCase ) for x in speech_inputs]
return speech_inputs
def lowerCAmelCase__ ( self , __UpperCamelCase=False , __UpperCamelCase=False )-> List[str]:
if equal_length:
UpperCAmelCase__ : List[str] = [floats_list((self.max_seq_length, self.num_mel_bins) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
UpperCAmelCase__ : Any = [
floats_list((x, self.num_mel_bins) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
UpperCAmelCase__ : Dict = [np.asarray(__UpperCamelCase ) for x in speech_inputs]
return speech_inputs
@require_torch
class _lowercase ( lowerCAmelCase_ , unittest.TestCase ):
'''simple docstring'''
_A = SpeechTaFeatureExtractor
def lowerCAmelCase__ ( self )-> Optional[Any]:
UpperCAmelCase__ : str = SpeechTaFeatureExtractionTester(self )
def lowerCAmelCase__ ( self , __UpperCamelCase )-> int:
self.assertTrue(np.all(np.mean(__UpperCamelCase , axis=0 ) < 1E-3 ) )
self.assertTrue(np.all(np.abs(np.var(__UpperCamelCase , axis=0 ) - 1 ) < 1E-3 ) )
def lowerCAmelCase__ ( self )-> int:
# Tests that all call wrap to encode_plus and batch_encode_plus
UpperCAmelCase__ : Optional[int] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
UpperCAmelCase__ : str = [floats_list((1, x) )[0] for x in range(8_00 , 14_00 , 2_00 )]
UpperCAmelCase__ : Any = [np.asarray(__UpperCamelCase ) for speech_input in speech_inputs]
# Test not batched input
UpperCAmelCase__ : str = feat_extract(speech_inputs[0] , return_tensors="np" ).input_values
UpperCAmelCase__ : Optional[int] = feat_extract(np_speech_inputs[0] , return_tensors="np" ).input_values
self.assertTrue(np.allclose(__UpperCamelCase , __UpperCamelCase , atol=1E-3 ) )
# Test batched
UpperCAmelCase__ : Tuple = feat_extract(__UpperCamelCase , return_tensors="np" ).input_values
UpperCAmelCase__ : Union[str, Any] = feat_extract(__UpperCamelCase , return_tensors="np" ).input_values
for enc_seq_a, enc_seq_a in zip(__UpperCamelCase , __UpperCamelCase ):
self.assertTrue(np.allclose(__UpperCamelCase , __UpperCamelCase , atol=1E-3 ) )
def lowerCAmelCase__ ( self )-> Optional[Any]:
UpperCAmelCase__ : Optional[int] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
UpperCAmelCase__ : Any = [floats_list((1, x) )[0] for x in range(8_00 , 14_00 , 2_00 )]
UpperCAmelCase__ : Dict = ["longest", "max_length", "do_not_pad"]
UpperCAmelCase__ : Dict = [None, 16_00, None]
for max_length, padding in zip(__UpperCamelCase , __UpperCamelCase ):
UpperCAmelCase__ : Optional[int] = feat_extract(__UpperCamelCase , padding=__UpperCamelCase , max_length=__UpperCamelCase , return_tensors="np" )
UpperCAmelCase__ : int = processed.input_values
self._check_zero_mean_unit_variance(input_values[0][:8_00] )
self.assertTrue(input_values[0][8_00:].sum() < 1E-6 )
self._check_zero_mean_unit_variance(input_values[1][:10_00] )
self.assertTrue(input_values[0][10_00:].sum() < 1E-6 )
self._check_zero_mean_unit_variance(input_values[2][:12_00] )
def lowerCAmelCase__ ( self )-> List[str]:
UpperCAmelCase__ : int = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
UpperCAmelCase__ : Dict = range(8_00 , 14_00 , 2_00 )
UpperCAmelCase__ : str = [floats_list((1, x) )[0] for x in lengths]
UpperCAmelCase__ : List[Any] = ["longest", "max_length", "do_not_pad"]
UpperCAmelCase__ : Dict = [None, 16_00, None]
for max_length, padding in zip(__UpperCamelCase , __UpperCamelCase ):
UpperCAmelCase__ : Any = feat_extract(__UpperCamelCase , max_length=__UpperCamelCase , padding=__UpperCamelCase )
UpperCAmelCase__ : Any = processed.input_values
self._check_zero_mean_unit_variance(input_values[0][:8_00] )
self._check_zero_mean_unit_variance(input_values[1][:10_00] )
self._check_zero_mean_unit_variance(input_values[2][:12_00] )
def lowerCAmelCase__ ( self )-> Optional[Any]:
UpperCAmelCase__ : str = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
UpperCAmelCase__ : List[str] = [floats_list((1, x) )[0] for x in range(8_00 , 14_00 , 2_00 )]
UpperCAmelCase__ : Union[str, Any] = feat_extract(
__UpperCamelCase , truncation=__UpperCamelCase , max_length=10_00 , padding="max_length" , return_tensors="np" )
UpperCAmelCase__ : Dict = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :8_00] )
self._check_zero_mean_unit_variance(input_values[1] )
self._check_zero_mean_unit_variance(input_values[2] )
def lowerCAmelCase__ ( self )-> str:
UpperCAmelCase__ : Optional[Any] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
UpperCAmelCase__ : Tuple = [floats_list((1, x) )[0] for x in range(8_00 , 14_00 , 2_00 )]
UpperCAmelCase__ : Any = feat_extract(
__UpperCamelCase , truncation=__UpperCamelCase , max_length=10_00 , padding="longest" , return_tensors="np" )
UpperCAmelCase__ : Union[str, Any] = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :8_00] )
self._check_zero_mean_unit_variance(input_values[1, :10_00] )
self._check_zero_mean_unit_variance(input_values[2] )
# make sure that if max_length < longest -> then pad to max_length
self.assertTrue(input_values.shape == (3, 10_00) )
UpperCAmelCase__ : str = [floats_list((1, x) )[0] for x in range(8_00 , 14_00 , 2_00 )]
UpperCAmelCase__ : Optional[Any] = feat_extract(
__UpperCamelCase , truncation=__UpperCamelCase , max_length=20_00 , padding="longest" , return_tensors="np" )
UpperCAmelCase__ : Union[str, Any] = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :8_00] )
self._check_zero_mean_unit_variance(input_values[1, :10_00] )
self._check_zero_mean_unit_variance(input_values[2] )
# make sure that if max_length > longest -> then pad to longest
self.assertTrue(input_values.shape == (3, 12_00) )
def lowerCAmelCase__ ( self )-> List[str]:
UpperCAmelCase__ : Any = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
UpperCAmelCase__ : Union[str, Any] = np.random.rand(1_00 ).astype(np.floataa )
UpperCAmelCase__ : Any = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
UpperCAmelCase__ : Any = feature_extractor.pad([{"input_values": inputs}] , return_tensors="np" )
self.assertTrue(np_processed.input_values.dtype == np.floataa )
UpperCAmelCase__ : Optional[Any] = feature_extractor.pad([{"input_values": inputs}] , return_tensors="pt" )
self.assertTrue(pt_processed.input_values.dtype == torch.floataa )
def lowerCAmelCase__ ( self )-> List[str]:
# Tests that all call wrap to encode_plus and batch_encode_plus
UpperCAmelCase__ : Any = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
UpperCAmelCase__ : str = [floats_list((1, x) )[0] for x in range(8_00 , 14_00 , 2_00 )]
UpperCAmelCase__ : str = [np.asarray(__UpperCamelCase ) for speech_input in speech_inputs]
# Test feature size
UpperCAmelCase__ : Optional[int] = feature_extractor(audio_target=__UpperCamelCase , padding=__UpperCamelCase , return_tensors="np" ).input_values
self.assertTrue(input_values.ndim == 3 )
self.assertTrue(input_values.shape[-1] == feature_extractor.num_mel_bins )
# Test not batched input
UpperCAmelCase__ : Dict = feature_extractor(speech_inputs[0] , return_tensors="np" ).input_values
UpperCAmelCase__ : Union[str, Any] = feature_extractor(np_speech_inputs[0] , return_tensors="np" ).input_values
self.assertTrue(np.allclose(__UpperCamelCase , __UpperCamelCase , atol=1E-3 ) )
# Test batched
UpperCAmelCase__ : str = feature_extractor(__UpperCamelCase , return_tensors="np" ).input_values
UpperCAmelCase__ : Union[str, Any] = feature_extractor(__UpperCamelCase , return_tensors="np" ).input_values
for enc_seq_a, enc_seq_a in zip(__UpperCamelCase , __UpperCamelCase ):
self.assertTrue(np.allclose(__UpperCamelCase , __UpperCamelCase , atol=1E-3 ) )
# Test 2-D numpy arrays are batched.
UpperCAmelCase__ : int = [floats_list((1, x) )[0] for x in (8_00, 8_00, 8_00)]
UpperCAmelCase__ : Union[str, Any] = np.asarray(__UpperCamelCase )
UpperCAmelCase__ : List[Any] = feature_extractor(__UpperCamelCase , return_tensors="np" ).input_values
UpperCAmelCase__ : List[str] = feature_extractor(__UpperCamelCase , return_tensors="np" ).input_values
for enc_seq_a, enc_seq_a in zip(__UpperCamelCase , __UpperCamelCase ):
self.assertTrue(np.allclose(__UpperCamelCase , __UpperCamelCase , atol=1E-3 ) )
def lowerCAmelCase__ ( self )-> Optional[Any]:
UpperCAmelCase__ : str = self.feat_extract_tester.prepare_inputs_for_target()
UpperCAmelCase__ : Union[str, Any] = self.feature_extraction_class(**self.feat_extract_dict )
UpperCAmelCase__ : Optional[int] = feat_extract.model_input_names[0]
UpperCAmelCase__ : List[Any] = BatchFeature({input_name: speech_inputs} )
self.assertTrue(all(len(__UpperCamelCase ) == len(__UpperCamelCase ) for x, y in zip(__UpperCamelCase , processed_features[input_name] ) ) )
UpperCAmelCase__ : Dict = self.feat_extract_tester.prepare_inputs_for_target(equal_length=__UpperCamelCase )
UpperCAmelCase__ : Optional[Any] = BatchFeature({input_name: speech_inputs} , tensor_type="np" )
UpperCAmelCase__ : Optional[int] = processed_features[input_name]
if len(batch_features_input.shape ) < 3:
UpperCAmelCase__ : Optional[int] = batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.num_mel_bins) )
@require_torch
def lowerCAmelCase__ ( self )-> List[str]:
UpperCAmelCase__ : str = self.feat_extract_tester.prepare_inputs_for_target(equal_length=__UpperCamelCase )
UpperCAmelCase__ : Union[str, Any] = self.feature_extraction_class(**self.feat_extract_dict )
UpperCAmelCase__ : List[str] = feat_extract.model_input_names[0]
UpperCAmelCase__ : str = BatchFeature({input_name: speech_inputs} , tensor_type="pt" )
UpperCAmelCase__ : Any = processed_features[input_name]
if len(batch_features_input.shape ) < 3:
UpperCAmelCase__ : str = batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.num_mel_bins) )
@require_torch
def lowerCAmelCase__ ( self )-> Optional[int]:
UpperCAmelCase__ : Dict = self.feature_extraction_class(**self.feat_extract_dict )
UpperCAmelCase__ : Dict = self.feat_extract_tester.prepare_inputs_for_target()
UpperCAmelCase__ : Dict = feat_extract.model_input_names[0]
UpperCAmelCase__ : Tuple = BatchFeature({input_name: speech_inputs} )
UpperCAmelCase__ : Optional[int] = feat_extract.num_mel_bins # hack!
UpperCAmelCase__ : List[Any] = feat_extract.pad(__UpperCamelCase , padding="longest" , return_tensors="np" )[input_name]
UpperCAmelCase__ : Optional[int] = feat_extract.pad(__UpperCamelCase , padding="longest" , return_tensors="pt" )[input_name]
self.assertTrue(abs(input_np.astype(np.floataa ).sum() - input_pt.numpy().astype(np.floataa ).sum() ) < 1E-2 )
def lowerCAmelCase__ ( self )-> Optional[int]:
UpperCAmelCase__ : List[str] = self.feat_extract_dict
UpperCAmelCase__ : int = True
UpperCAmelCase__ : Optional[Any] = self.feature_extraction_class(**__UpperCamelCase )
UpperCAmelCase__ : Optional[Any] = self.feat_extract_tester.prepare_inputs_for_target()
UpperCAmelCase__ : Any = [len(__UpperCamelCase ) for x in speech_inputs]
UpperCAmelCase__ : List[Any] = feat_extract.model_input_names[0]
UpperCAmelCase__ : int = BatchFeature({input_name: speech_inputs} )
UpperCAmelCase__ : Union[str, Any] = feat_extract.num_mel_bins # hack!
UpperCAmelCase__ : Dict = feat_extract.pad(__UpperCamelCase , padding="longest" , return_tensors="np" )
self.assertIn("attention_mask" , __UpperCamelCase )
self.assertListEqual(list(processed.attention_mask.shape ) , list(processed[input_name].shape[:2] ) )
self.assertListEqual(processed.attention_mask.sum(-1 ).tolist() , __UpperCamelCase )
def lowerCAmelCase__ ( self )-> Tuple:
UpperCAmelCase__ : int = self.feat_extract_dict
UpperCAmelCase__ : List[str] = True
UpperCAmelCase__ : List[Any] = self.feature_extraction_class(**__UpperCamelCase )
UpperCAmelCase__ : List[str] = self.feat_extract_tester.prepare_inputs_for_target()
UpperCAmelCase__ : int = [len(__UpperCamelCase ) for x in speech_inputs]
UpperCAmelCase__ : Optional[Any] = feat_extract.model_input_names[0]
UpperCAmelCase__ : Any = BatchFeature({input_name: speech_inputs} )
UpperCAmelCase__ : int = min(__UpperCamelCase )
UpperCAmelCase__ : Optional[Any] = feat_extract.num_mel_bins # hack!
UpperCAmelCase__ : int = feat_extract.pad(
__UpperCamelCase , padding="max_length" , max_length=__UpperCamelCase , truncation=__UpperCamelCase , return_tensors="np" )
self.assertIn("attention_mask" , __UpperCamelCase )
self.assertListEqual(
list(processed_pad.attention_mask.shape ) , [processed_pad[input_name].shape[0], max_length] )
self.assertListEqual(
processed_pad.attention_mask[:, :max_length].sum(-1 ).tolist() , [max_length for x in speech_inputs] )
def lowerCAmelCase__ ( self , __UpperCamelCase )-> str:
from datasets import load_dataset
UpperCAmelCase__ : List[Any] = load_dataset("hf-internal-testing/librispeech_asr_dummy" , "clean" , split="validation" )
# automatic decoding with librispeech
UpperCAmelCase__ : Optional[Any] = ds.sort("id" ).select(range(__UpperCamelCase ) )[:num_samples]["audio"]
return [x["array"] for x in speech_samples]
def lowerCAmelCase__ ( self )-> List[Any]:
# fmt: off
UpperCAmelCase__ : List[Any] = torch.tensor(
[2.38_04E-03, 2.07_52E-03, 1.98_36E-03, 2.10_57E-03, 1.61_74E-03,
3.05_18E-04, 9.15_53E-05, 3.35_69E-04, 9.76_56E-04, 1.83_11E-03,
2.01_42E-03, 2.10_57E-03, 1.73_95E-03, 4.57_76E-04, -3.96_73E-04,
4.57_76E-04, 1.00_71E-03, 9.15_53E-05, 4.88_28E-04, 1.15_97E-03,
7.32_42E-04, 9.46_04E-04, 1.80_05E-03, 1.83_11E-03, 8.85_01E-04,
4.27_25E-04, 4.88_28E-04, 7.32_42E-04, 1.09_86E-03, 2.10_57E-03] )
# fmt: on
UpperCAmelCase__ : Dict = self._load_datasamples(1 )
UpperCAmelCase__ : str = SpeechTaFeatureExtractor()
UpperCAmelCase__ : Union[str, Any] = feature_extractor(__UpperCamelCase , return_tensors="pt" ).input_values
self.assertEquals(input_values.shape , (1, 9_36_80) )
self.assertTrue(torch.allclose(input_values[0, :30] , __UpperCamelCase , atol=1E-6 ) )
def lowerCAmelCase__ ( self )-> List[str]:
# fmt: off
UpperCAmelCase__ : int = torch.tensor(
[-2.6870, -3.0104, -3.1356, -3.5352, -3.0044, -3.0353, -3.4719, -3.6777,
-3.1520, -2.9435, -2.6553, -2.8795, -2.9944, -2.5921, -3.0279, -3.0386,
-3.0864, -3.1291, -3.2353, -2.7444, -2.6831, -2.7287, -3.1761, -3.1571,
-3.2726, -3.0582, -3.1007, -3.4533, -3.4695, -3.0998] )
# fmt: on
UpperCAmelCase__ : Tuple = self._load_datasamples(1 )
UpperCAmelCase__ : Union[str, Any] = SpeechTaFeatureExtractor()
UpperCAmelCase__ : List[Any] = feature_extractor(audio_target=__UpperCamelCase , return_tensors="pt" ).input_values
self.assertEquals(input_values.shape , (1, 3_66, 80) )
self.assertTrue(torch.allclose(input_values[0, 0, :30] , __UpperCamelCase , atol=1E-4 ) )
| 660 |
"""simple docstring"""
import argparse
import os
import torch
from transformers.utils import WEIGHTS_NAME
A__ : Optional[int] = ["""small""", """medium""", """large"""]
A__ : Optional[int] = """lm_head.decoder.weight"""
A__ : Dict = """lm_head.weight"""
def a__ ( lowerCAmelCase : str , lowerCAmelCase : str ):
'''simple docstring'''
UpperCAmelCase__ : Dict = torch.load(lowerCAmelCase )
UpperCAmelCase__ : Optional[int] = d.pop(lowerCAmelCase )
os.makedirs(lowerCAmelCase , exist_ok=lowerCAmelCase )
torch.save(lowerCAmelCase , os.path.join(lowerCAmelCase , lowerCAmelCase ) )
if __name__ == "__main__":
A__ : List[Any] = argparse.ArgumentParser()
parser.add_argument("""--dialogpt_path""", default=""".""", type=str)
A__ : Tuple = parser.parse_args()
for MODEL in DIALOGPT_MODELS:
A__ : Tuple = os.path.join(args.dialogpt_path, f"""{MODEL}_ft.pkl""")
A__ : str = f"""./DialoGPT-{MODEL}"""
convert_dialogpt_checkpoint(
checkpoint_path,
pytorch_dump_folder_path,
)
| 660 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
A__ : str = {
"""configuration_layoutlmv3""": [
"""LAYOUTLMV3_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""LayoutLMv3Config""",
"""LayoutLMv3OnnxConfig""",
],
"""processing_layoutlmv3""": ["""LayoutLMv3Processor"""],
"""tokenization_layoutlmv3""": ["""LayoutLMv3Tokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ : str = ["""LayoutLMv3TokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ : List[str] = [
"""LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""LayoutLMv3ForQuestionAnswering""",
"""LayoutLMv3ForSequenceClassification""",
"""LayoutLMv3ForTokenClassification""",
"""LayoutLMv3Model""",
"""LayoutLMv3PreTrainedModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ : Optional[int] = [
"""TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFLayoutLMv3ForQuestionAnswering""",
"""TFLayoutLMv3ForSequenceClassification""",
"""TFLayoutLMv3ForTokenClassification""",
"""TFLayoutLMv3Model""",
"""TFLayoutLMv3PreTrainedModel""",
]
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ : Optional[Any] = ["""LayoutLMv3FeatureExtractor"""]
A__ : Dict = ["""LayoutLMv3ImageProcessor"""]
if TYPE_CHECKING:
from .configuration_layoutlmva import (
LAYOUTLMV3_PRETRAINED_CONFIG_ARCHIVE_MAP,
LayoutLMvaConfig,
LayoutLMvaOnnxConfig,
)
from .processing_layoutlmva import LayoutLMvaProcessor
from .tokenization_layoutlmva import LayoutLMvaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_layoutlmva_fast import LayoutLMvaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_layoutlmva import (
LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST,
LayoutLMvaForQuestionAnswering,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaModel,
LayoutLMvaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_layoutlmva import (
TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST,
TFLayoutLMvaForQuestionAnswering,
TFLayoutLMvaForSequenceClassification,
TFLayoutLMvaForTokenClassification,
TFLayoutLMvaModel,
TFLayoutLMvaPreTrainedModel,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_layoutlmva import LayoutLMvaFeatureExtractor
from .image_processing_layoutlmva import LayoutLMvaImageProcessor
else:
import sys
A__ : Optional[int] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 660 |
"""simple docstring"""
from math import isqrt
def a__ ( lowerCAmelCase : int ):
'''simple docstring'''
UpperCAmelCase__ : Tuple = [True] * max_number
for i in range(2 , isqrt(max_number - 1 ) + 1 ):
if is_prime[i]:
for j in range(i**2 , lowerCAmelCase , lowerCAmelCase ):
UpperCAmelCase__ : List[Any] = False
return [i for i in range(2 , lowerCAmelCase ) if is_prime[i]]
def a__ ( lowerCAmelCase : int = 10**8 ):
'''simple docstring'''
UpperCAmelCase__ : Dict = calculate_prime_numbers(max_number // 2 )
UpperCAmelCase__ : Optional[int] = 0
UpperCAmelCase__ : Optional[int] = 0
UpperCAmelCase__ : Tuple = len(lowerCAmelCase ) - 1
while left <= right:
while prime_numbers[left] * prime_numbers[right] >= max_number:
right -= 1
semiprimes_count += right - left + 1
left += 1
return semiprimes_count
if __name__ == "__main__":
print(f"""{solution() = }""")
| 660 | 1 |
"""simple docstring"""
import math
from typing import Optional
import numpy as np
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A__ : List[str] = logging.get_logger(__name__)
A__ : int = {
"""facebook/encodec_24khz""": """https://huggingface.co/facebook/encodec_24khz/resolve/main/config.json""",
"""facebook/encodec_48khz""": """https://huggingface.co/facebook/encodec_48khz/resolve/main/config.json""",
}
class _lowercase ( lowerCAmelCase_ ):
'''simple docstring'''
_A = 'encodec'
def __init__( self , __UpperCamelCase=[1.5, 3.0, 6.0, 12.0, 24.0] , __UpperCamelCase=2_40_00 , __UpperCamelCase=1 , __UpperCamelCase=False , __UpperCamelCase=None , __UpperCamelCase=None , __UpperCamelCase=1_28 , __UpperCamelCase=32 , __UpperCamelCase=1 , __UpperCamelCase=[8, 5, 4, 2] , __UpperCamelCase="weight_norm" , __UpperCamelCase=7 , __UpperCamelCase=7 , __UpperCamelCase=3 , __UpperCamelCase=2 , __UpperCamelCase=True , __UpperCamelCase="reflect" , __UpperCamelCase=2 , __UpperCamelCase=2 , __UpperCamelCase=1.0 , __UpperCamelCase=10_24 , __UpperCamelCase=None , __UpperCamelCase=True , **__UpperCamelCase , )-> str:
UpperCAmelCase__ : Any = target_bandwidths
UpperCAmelCase__ : Tuple = sampling_rate
UpperCAmelCase__ : Union[str, Any] = audio_channels
UpperCAmelCase__ : List[Any] = normalize
UpperCAmelCase__ : Optional[Any] = chunk_length_s
UpperCAmelCase__ : int = overlap
UpperCAmelCase__ : Union[str, Any] = hidden_size
UpperCAmelCase__ : Dict = num_filters
UpperCAmelCase__ : Any = num_residual_layers
UpperCAmelCase__ : Tuple = upsampling_ratios
UpperCAmelCase__ : Dict = norm_type
UpperCAmelCase__ : Optional[int] = kernel_size
UpperCAmelCase__ : Optional[int] = last_kernel_size
UpperCAmelCase__ : str = residual_kernel_size
UpperCAmelCase__ : Union[str, Any] = dilation_growth_rate
UpperCAmelCase__ : str = use_causal_conv
UpperCAmelCase__ : Optional[Any] = pad_mode
UpperCAmelCase__ : Any = compress
UpperCAmelCase__ : Any = num_lstm_layers
UpperCAmelCase__ : Any = trim_right_ratio
UpperCAmelCase__ : List[str] = codebook_size
UpperCAmelCase__ : Optional[int] = codebook_dim if codebook_dim is not None else hidden_size
UpperCAmelCase__ : Optional[Any] = use_conv_shortcut
if self.norm_type not in ["weight_norm", "time_group_norm"]:
raise ValueError(
F"self.norm_type must be one of `\"weight_norm\"`, `\"time_group_norm\"`), got {self.norm_type}" )
super().__init__(**__UpperCamelCase )
@property
def lowerCAmelCase__ ( self )-> Optional[int]:
if self.chunk_length_s is None:
return None
else:
return int(self.chunk_length_s * self.sampling_rate )
@property
def lowerCAmelCase__ ( self )-> Optional[int]:
if self.chunk_length_s is None or self.overlap is None:
return None
else:
return max(1 , int((1.0 - self.overlap) * self.chunk_length ) )
@property
def lowerCAmelCase__ ( self )-> int:
UpperCAmelCase__ : Union[str, Any] = np.prod(self.upsampling_ratios )
return math.ceil(self.sampling_rate / hop_length )
@property
def lowerCAmelCase__ ( self )-> int:
return int(10_00 * self.target_bandwidths[-1] // (self.frame_rate * 10) )
| 660 |
"""simple docstring"""
import unicodedata
from dataclasses import dataclass
from typing import Optional, Union
import numpy as np
from transformers.data.data_collator import DataCollatorMixin
from transformers.file_utils import PaddingStrategy
from transformers.tokenization_utils_base import PreTrainedTokenizerBase
def a__ ( lowerCAmelCase : str , lowerCAmelCase : Optional[Any] , lowerCAmelCase : Dict , lowerCAmelCase : List[Any] ):
'''simple docstring'''
if isinstance(lowerCAmelCase , lowerCAmelCase ):
UpperCAmelCase__ : Optional[int] = np.full((len(lowerCAmelCase ), sequence_length, 2) , lowerCAmelCase )
else:
UpperCAmelCase__ : Optional[Any] = np.full((len(lowerCAmelCase ), sequence_length) , lowerCAmelCase )
for i, tensor in enumerate(lowerCAmelCase ):
if padding_side == "right":
if isinstance(lowerCAmelCase , lowerCAmelCase ):
UpperCAmelCase__ : Dict = tensor[:sequence_length]
else:
UpperCAmelCase__ : Tuple = tensor[:sequence_length]
else:
if isinstance(lowerCAmelCase , lowerCAmelCase ):
UpperCAmelCase__ : Optional[Any] = tensor[:sequence_length]
else:
UpperCAmelCase__ : int = tensor[:sequence_length]
return out_tensor.tolist()
def a__ ( lowerCAmelCase : Optional[int] ):
'''simple docstring'''
UpperCAmelCase__ : Tuple = ord(lowerCAmelCase )
if (cp >= 33 and cp <= 47) or (cp >= 58 and cp <= 64) or (cp >= 91 and cp <= 96) or (cp >= 123 and cp <= 126):
return True
UpperCAmelCase__ : Optional[Any] = unicodedata.category(lowerCAmelCase )
if cat.startswith("P" ):
return True
return False
@dataclass
class _lowercase ( lowerCAmelCase_ ):
'''simple docstring'''
_A = 42
_A = True
_A = None
_A = None
_A = -100
_A = "pt"
def lowerCAmelCase__ ( self , __UpperCamelCase )-> List[str]:
import torch
UpperCAmelCase__ : Optional[Any] = "label" if "label" in features[0].keys() else "labels"
UpperCAmelCase__ : Dict = [feature[label_name] for feature in features] if label_name in features[0].keys() else None
UpperCAmelCase__ : str = self.tokenizer.pad(
__UpperCamelCase , padding=self.padding , max_length=self.max_length , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors="pt" if labels is None else None , )
if labels is None:
return batch
UpperCAmelCase__ : Optional[Any] = torch.tensor(batch["entity_ids"] ).shape[1]
UpperCAmelCase__ : int = self.tokenizer.padding_side
if padding_side == "right":
UpperCAmelCase__ : int = [
list(__UpperCamelCase ) + [self.label_pad_token_id] * (sequence_length - len(__UpperCamelCase )) for label in labels
]
else:
UpperCAmelCase__ : List[Any] = [
[self.label_pad_token_id] * (sequence_length - len(__UpperCamelCase )) + list(__UpperCamelCase ) for label in labels
]
UpperCAmelCase__ : Optional[Any] = [feature["ner_tags"] for feature in features]
UpperCAmelCase__ : int = padding_tensor(__UpperCamelCase , -1 , __UpperCamelCase , __UpperCamelCase )
UpperCAmelCase__ : List[Any] = [feature["original_entity_spans"] for feature in features]
UpperCAmelCase__ : int = padding_tensor(__UpperCamelCase , (-1, -1) , __UpperCamelCase , __UpperCamelCase )
UpperCAmelCase__ : Optional[int] = {k: torch.tensor(__UpperCamelCase , dtype=torch.intaa ) for k, v in batch.items()}
return batch
| 660 | 1 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A__ : List[Any] = logging.get_logger(__name__)
A__ : List[Any] = {"""openai-gpt""": """https://huggingface.co/openai-gpt/resolve/main/config.json"""}
class _lowercase ( lowerCAmelCase_ ):
'''simple docstring'''
_A = 'openai-gpt'
_A = {
'max_position_embeddings': 'n_positions',
'hidden_size': 'n_embd',
'num_attention_heads': 'n_head',
'num_hidden_layers': 'n_layer',
}
def __init__( self , __UpperCamelCase=4_04_78 , __UpperCamelCase=5_12 , __UpperCamelCase=7_68 , __UpperCamelCase=12 , __UpperCamelCase=12 , __UpperCamelCase="gelu" , __UpperCamelCase=0.1 , __UpperCamelCase=0.1 , __UpperCamelCase=0.1 , __UpperCamelCase=1E-5 , __UpperCamelCase=0.02 , __UpperCamelCase="cls_index" , __UpperCamelCase=True , __UpperCamelCase=None , __UpperCamelCase=True , __UpperCamelCase=0.1 , **__UpperCamelCase , )-> Union[str, Any]:
UpperCAmelCase__ : List[Any] = vocab_size
UpperCAmelCase__ : Optional[Any] = n_positions
UpperCAmelCase__ : Union[str, Any] = n_embd
UpperCAmelCase__ : Union[str, Any] = n_layer
UpperCAmelCase__ : Tuple = n_head
UpperCAmelCase__ : List[Any] = afn
UpperCAmelCase__ : Tuple = resid_pdrop
UpperCAmelCase__ : Optional[int] = embd_pdrop
UpperCAmelCase__ : Optional[Any] = attn_pdrop
UpperCAmelCase__ : Dict = layer_norm_epsilon
UpperCAmelCase__ : List[str] = initializer_range
UpperCAmelCase__ : Optional[Any] = summary_type
UpperCAmelCase__ : Optional[Any] = summary_use_proj
UpperCAmelCase__ : List[Any] = summary_activation
UpperCAmelCase__ : Any = summary_first_dropout
UpperCAmelCase__ : str = summary_proj_to_labels
super().__init__(**__UpperCamelCase )
| 660 |
"""simple docstring"""
import timeit
import numpy as np
import datasets
from datasets.arrow_writer import ArrowWriter
from datasets.features.features import _ArrayXD
def a__ ( lowerCAmelCase : List[str] ):
'''simple docstring'''
def wrapper(*lowerCAmelCase : Any , **lowerCAmelCase : Tuple ):
UpperCAmelCase__ : Optional[int] = timeit.default_timer()
UpperCAmelCase__ : int = func(*lowerCAmelCase , **lowerCAmelCase )
UpperCAmelCase__ : List[Any] = timeit.default_timer() - starttime
return delta
UpperCAmelCase__ : int = func.__name__
return wrapper
def a__ ( lowerCAmelCase : dict , lowerCAmelCase : Optional[int]=100 , lowerCAmelCase : List[str]=None ):
'''simple docstring'''
UpperCAmelCase__ : str = []
UpperCAmelCase__ : Optional[Any] = seq_shapes or {}
for i in range(lowerCAmelCase ):
UpperCAmelCase__ : int = {}
for col_id, (k, v) in enumerate(features.items() ):
if isinstance(lowerCAmelCase , _ArrayXD ):
UpperCAmelCase__ : List[str] = np.random.rand(*v.shape ).astype(v.dtype )
elif isinstance(lowerCAmelCase , datasets.Value ):
if v.dtype == "string":
UpperCAmelCase__ : Dict = "The small grey turtle was surprisingly fast when challenged."
else:
UpperCAmelCase__ : str = np.random.randint(10 , size=1 ).astype(v.dtype ).item()
elif isinstance(lowerCAmelCase , datasets.Sequence ):
while isinstance(lowerCAmelCase , datasets.Sequence ):
UpperCAmelCase__ : List[str] = v.feature
UpperCAmelCase__ : Optional[int] = seq_shapes[k]
UpperCAmelCase__ : Optional[int] = np.random.rand(*lowerCAmelCase ).astype(v.dtype )
UpperCAmelCase__ : Union[str, Any] = data
dummy_data.append((i, example) )
return dummy_data
def a__ ( lowerCAmelCase : List[str] , lowerCAmelCase : Tuple , lowerCAmelCase : List[str]=100 , lowerCAmelCase : Optional[int]=None ):
'''simple docstring'''
UpperCAmelCase__ : int = generate_examples(lowerCAmelCase , num_examples=lowerCAmelCase , seq_shapes=lowerCAmelCase )
with ArrowWriter(features=lowerCAmelCase , path=lowerCAmelCase ) as writer:
for key, record in dummy_data:
UpperCAmelCase__ : List[Any] = features.encode_example(lowerCAmelCase )
writer.write(lowerCAmelCase )
UpperCAmelCase__ , UpperCAmelCase__ : Optional[Any] = writer.finalize()
if not num_final_examples == num_examples:
raise ValueError(
F"Error writing the dataset, wrote {num_final_examples} examples but should have written {num_examples}." )
UpperCAmelCase__ : Optional[int] = datasets.Dataset.from_file(filename=lowerCAmelCase , info=datasets.DatasetInfo(features=lowerCAmelCase ) )
return dataset
| 660 | 1 |
"""simple docstring"""
def a__ ( lowerCAmelCase : list ):
'''simple docstring'''
if len(lowerCAmelCase ) <= 1:
return [tuple(lowerCAmelCase )]
UpperCAmelCase__ : str = []
def generate(lowerCAmelCase : int , lowerCAmelCase : list ):
if k == 1:
res.append(tuple(arr[:] ) )
return
generate(k - 1 , lowerCAmelCase )
for i in range(k - 1 ):
if k % 2 == 0: # k is even
UpperCAmelCase__ , UpperCAmelCase__ : List[Any] = arr[k - 1], arr[i]
else: # k is odd
UpperCAmelCase__ , UpperCAmelCase__ : Any = arr[k - 1], arr[0]
generate(k - 1 , lowerCAmelCase )
generate(len(lowerCAmelCase ) , lowerCAmelCase )
return res
if __name__ == "__main__":
A__ : Optional[Any] = input("""Enter numbers separated by a comma:\n""").strip()
A__ : Optional[int] = [int(item) for item in user_input.split(""",""")]
print(heaps(arr))
| 660 |
"""simple docstring"""
from manim import *
class _lowercase ( lowerCAmelCase_ ):
'''simple docstring'''
def lowerCAmelCase__ ( self )-> Tuple:
UpperCAmelCase__ : str = Rectangle(height=0.5 , width=0.5 )
UpperCAmelCase__ : List[str] = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0 )
UpperCAmelCase__ : List[Any] = [mem.copy() for i in range(6 )]
UpperCAmelCase__ : Union[str, Any] = [mem.copy() for i in range(6 )]
UpperCAmelCase__ : List[Any] = VGroup(*__UpperCamelCase ).arrange(__UpperCamelCase , buff=0 )
UpperCAmelCase__ : int = VGroup(*__UpperCamelCase ).arrange(__UpperCamelCase , buff=0 )
UpperCAmelCase__ : Tuple = VGroup(__UpperCamelCase , __UpperCamelCase ).arrange(__UpperCamelCase , buff=0 )
UpperCAmelCase__ : Tuple = Text("CPU" , font_size=24 )
UpperCAmelCase__ : Any = Group(__UpperCamelCase , __UpperCamelCase ).arrange(__UpperCamelCase , buff=0.5 , aligned_edge=__UpperCamelCase )
cpu.move_to([-2.5, -0.5, 0] )
self.add(__UpperCamelCase )
UpperCAmelCase__ : List[Any] = [mem.copy() for i in range(4 )]
UpperCAmelCase__ : List[str] = VGroup(*__UpperCamelCase ).arrange(__UpperCamelCase , buff=0 )
UpperCAmelCase__ : Union[str, Any] = Text("GPU" , font_size=24 )
UpperCAmelCase__ : Dict = Group(__UpperCamelCase , __UpperCamelCase ).arrange(__UpperCamelCase , buff=0.5 , aligned_edge=__UpperCamelCase )
gpu.move_to([-1, -1, 0] )
self.add(__UpperCamelCase )
UpperCAmelCase__ : Optional[int] = [mem.copy() for i in range(6 )]
UpperCAmelCase__ : List[str] = VGroup(*__UpperCamelCase ).arrange(__UpperCamelCase , buff=0 )
UpperCAmelCase__ : Tuple = Text("Model" , font_size=24 )
UpperCAmelCase__ : Dict = Group(__UpperCamelCase , __UpperCamelCase ).arrange(__UpperCamelCase , buff=0.5 , aligned_edge=__UpperCamelCase )
model.move_to([3, -1.0, 0] )
self.add(__UpperCamelCase )
UpperCAmelCase__ : List[str] = []
for i, rect in enumerate(__UpperCamelCase ):
rect.set_stroke(__UpperCamelCase )
# target = fill.copy().set_fill(YELLOW, opacity=0.7)
# target.move_to(rect)
# self.add(target)
UpperCAmelCase__ : int = Rectangle(height=0.46 / 4 , width=0.46 / 3 ).set_stroke(width=0.0 ).set_fill(__UpperCamelCase , opacity=0.7 )
if i == 0:
cpu_target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT ) , buff=0.02 , direction=__UpperCamelCase )
cpu_target.set_x(cpu_target.get_x() + 0.1 )
elif i == 3:
cpu_target.next_to(cpu_targs[0] , direction=__UpperCamelCase , buff=0.0 )
else:
cpu_target.next_to(cpu_targs[i - 1] , direction=__UpperCamelCase , buff=0.0 )
self.add(__UpperCamelCase )
cpu_targs.append(__UpperCamelCase )
UpperCAmelCase__ : List[Any] = [mem.copy() for i in range(6 )]
UpperCAmelCase__ : Any = VGroup(*__UpperCamelCase ).arrange(__UpperCamelCase , buff=0 )
UpperCAmelCase__ : Tuple = Text("Loaded Checkpoint" , font_size=24 )
UpperCAmelCase__ : Any = Group(__UpperCamelCase , __UpperCamelCase ).arrange(__UpperCamelCase , aligned_edge=__UpperCamelCase , buff=0.4 )
checkpoint.move_to([3, 0.5, 0] )
UpperCAmelCase__ : Optional[Any] = Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
UpperCAmelCase__ : Any = MarkupText(
F"<b>Key:</b>\n\n<span fgcolor='{YELLOW}'>●</span> Empty Model" , font_size=18 , )
key_text.move_to([-5, 2.4, 0] )
self.add(__UpperCamelCase , __UpperCamelCase )
UpperCAmelCase__ : str = MarkupText(
F"<span fgcolor='{BLUE}'>●</span> Checkpoint" , font_size=18 , )
blue_text.next_to(__UpperCamelCase , DOWN * 2.4 , aligned_edge=key_text.get_left() )
UpperCAmelCase__ : Optional[Any] = MarkupText(
F"Next, a <i><span fgcolor=\"{BLUE}\">second</span></i> model is loaded into memory,\nwith the weights of a <span fgcolor=\"{BLUE}\">single shard</span>." , font_size=24 , )
step_a.move_to([2, 2, 0] )
self.play(Write(__UpperCamelCase ) , Write(__UpperCamelCase ) )
self.play(Write(__UpperCamelCase , run_time=1 ) , Create(__UpperCamelCase , run_time=1 ) )
UpperCAmelCase__ : Union[str, Any] = []
UpperCAmelCase__ : List[str] = []
for i, rect in enumerate(__UpperCamelCase ):
UpperCAmelCase__ : Optional[Any] = fill.copy().set_fill(__UpperCamelCase , opacity=0.7 )
target.move_to(__UpperCamelCase )
first_animations.append(GrowFromCenter(__UpperCamelCase , run_time=1 ) )
UpperCAmelCase__ : List[str] = target.copy()
cpu_target.generate_target()
if i < 5:
cpu_target.target.move_to(cpu_left_col_base[i + 1] )
else:
cpu_target.target.move_to(cpu_right_col_base[i - 5] )
second_animations.append(MoveToTarget(__UpperCamelCase , run_time=1.5 ) )
self.play(*__UpperCamelCase )
self.play(*__UpperCamelCase )
self.wait()
| 660 | 1 |
"""simple docstring"""
A__ : Dict = """
# Transformers installation
! pip install transformers datasets
# To install from source instead of the last release, comment the command above and uncomment the following one.
# ! pip install git+https://github.com/huggingface/transformers.git
"""
A__ : Dict = [{"""type""": """code""", """content""": INSTALL_CONTENT}]
A__ : Tuple = {
"""{processor_class}""": """FakeProcessorClass""",
"""{model_class}""": """FakeModelClass""",
"""{object_class}""": """FakeObjectClass""",
}
| 660 |
"""simple docstring"""
import warnings
from pathlib import Path
from typing import List, Tuple, Union
import fire
from torch import nn
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer, PreTrainedModel
from transformers.utils import logging
A__ : Tuple = logging.get_logger(__name__)
def a__ ( lowerCAmelCase : nn.ModuleList , lowerCAmelCase : nn.ModuleList , lowerCAmelCase : List[int] ):
'''simple docstring'''
UpperCAmelCase__ : Optional[Any] = nn.ModuleList([src_layers[i] for i in layers_to_copy] )
assert len(lowerCAmelCase ) == len(lowerCAmelCase ), F"{len(lowerCAmelCase )} != {len(lowerCAmelCase )}"
dest_layers.load_state_dict(layers_to_copy.state_dict() )
A__ : List[Any] = {
# maps num layers in teacher -> num_layers in student -> which teacher layers to copy.
# 12: bart, 16: pegasus, 6: marian/Helsinki-NLP
12: {
1: [0], # This says that if the teacher has 12 layers and the student has 1, copy layer 0 of the teacher
2: [0, 6],
3: [0, 6, 11],
4: [0, 4, 8, 11],
6: [0, 2, 4, 7, 9, 11],
9: [0, 1, 2, 4, 5, 7, 9, 10, 11],
12: list(range(12)),
},
16: { # maps num layers in student -> which teacher layers to copy
1: [0],
2: [0, 15],
3: [0, 8, 15],
4: [0, 5, 10, 15],
6: [0, 3, 6, 9, 12, 15],
8: [0, 2, 4, 6, 8, 10, 12, 15],
9: [0, 1, 3, 5, 7, 9, 11, 13, 15],
12: [0, 1, 2, 3, 4, 5, 6, 7, 9, 11, 13, 15],
16: list(range(16)),
},
6: {1: [0], 2: [0, 5], 3: [0, 2, 5], 4: [0, 1, 3, 5], 6: list(range(6))},
}
A__ : List[Any] = {
# maps num layers in student -> which teacher layers to copy.
6: {1: [5], 2: [3, 5], 3: [1, 4, 5], 4: [1, 2, 4, 5]},
12: {1: [11], 2: [5, 11], 3: [3, 7, 11], 6: [1, 3, 5, 8, 10, 11]},
16: {1: [15], 4: [4, 9, 12, 15], 8: [1, 3, 5, 7, 9, 11, 13, 15]},
}
def a__ ( lowerCAmelCase : Dict , lowerCAmelCase : Dict ):
'''simple docstring'''
try:
UpperCAmelCase__ : Tuple = LAYERS_TO_COPY[n_teacher][n_student]
return val
except KeyError:
if n_student != n_teacher:
warnings.warn(
F"no hardcoded layers to copy for teacher {n_teacher} -> student {n_student}, defaulting to first"
F" {n_student}" )
return list(range(lowerCAmelCase ) )
def a__ ( lowerCAmelCase : int , lowerCAmelCase : Tuple ):
'''simple docstring'''
if n_student > n_teacher:
raise ValueError(F"Cannot perform intermediate supervision for student {n_student} > teacher {n_teacher}" )
elif n_teacher == n_student:
return list(range(lowerCAmelCase ) )
elif n_student == 1:
return [n_teacher - 1]
else:
return LAYERS_TO_SUPERVISE[n_teacher][n_student]
def a__ ( lowerCAmelCase : Union[str, PreTrainedModel] , lowerCAmelCase : Union[str, Path] = "student" , lowerCAmelCase : Union[int, None] = None , lowerCAmelCase : Union[int, None] = None , lowerCAmelCase : List[str]=False , lowerCAmelCase : List[str]=None , lowerCAmelCase : List[str]=None , **lowerCAmelCase : List[str] , ):
'''simple docstring'''
UpperCAmelCase__ : List[str] = "encoder_layers and decoder_layers cannot be both None-- you would just have an identical teacher."
assert (e is not None) or (d is not None), _msg
if isinstance(lowerCAmelCase , lowerCAmelCase ):
AutoTokenizer.from_pretrained(lowerCAmelCase ).save_pretrained(lowerCAmelCase ) # purely for convenience
UpperCAmelCase__ : List[str] = AutoModelForSeqaSeqLM.from_pretrained(lowerCAmelCase ).eval()
else:
assert isinstance(lowerCAmelCase , lowerCAmelCase ), F"teacher must be a model or string got type {type(lowerCAmelCase )}"
UpperCAmelCase__ : int = teacher.config.to_diff_dict()
try:
UpperCAmelCase__ , UpperCAmelCase__ : Optional[Any] = teacher.config.encoder_layers, teacher.config.decoder_layers
if e is None:
UpperCAmelCase__ : Tuple = teacher_e
if d is None:
UpperCAmelCase__ : str = teacher_d
init_kwargs.update({"encoder_layers": e, "decoder_layers": d} )
except AttributeError: # T5
if hasattr(teacher.config , "num_encoder_layers" ):
UpperCAmelCase__ , UpperCAmelCase__ : Union[str, Any] = teacher.config.num_encoder_layers, teacher.config.num_decoder_layers
else:
UpperCAmelCase__ , UpperCAmelCase__ : Union[str, Any] = teacher.config.num_layers, teacher.config.num_decoder_layers
if e is None:
UpperCAmelCase__ : Optional[Any] = teacher_e
if d is None:
UpperCAmelCase__ : Optional[Any] = teacher_d
if hasattr(teacher.config , "num_encoder_layers" ):
init_kwargs.update({"num_encoder_layers": e, "num_decoder_layers": d} )
else:
init_kwargs.update({"num_layers": e, "num_decoder_layers": d} )
# Kwargs to instantiate student: teacher kwargs with updated layer numbers + **extra_config_kwargs
init_kwargs.update(lowerCAmelCase )
# Copy weights
UpperCAmelCase__ : Tuple = teacher.config_class(**lowerCAmelCase )
UpperCAmelCase__ : List[str] = AutoModelForSeqaSeqLM.from_config(lowerCAmelCase )
# Start by copying the full teacher state dict this will copy the first N teacher layers to the student.
UpperCAmelCase__ : Optional[int] = student.load_state_dict(teacher.state_dict() , strict=lowerCAmelCase )
assert info.missing_keys == [], info.missing_keys # every student key should have a teacher keys.
if copy_first_teacher_layers: # Our copying is done. We just log and save
UpperCAmelCase__ , UpperCAmelCase__ : int = list(range(lowerCAmelCase ) ), list(range(lowerCAmelCase ) )
logger.info(
F"Copied encoder layers {e_layers_to_copy} and decoder layers {d_layers_to_copy}. Saving them to"
F" {save_path}" )
student.save_pretrained(lowerCAmelCase )
return student, e_layers_to_copy, d_layers_to_copy
# Decide which layers of the teacher to copy. Not exactly alternating -- we try to keep first and last layer.
if e_layers_to_copy is None:
UpperCAmelCase__ : List[int] = pick_layers_to_copy(lowerCAmelCase , lowerCAmelCase )
if d_layers_to_copy is None:
UpperCAmelCase__ : List[int] = pick_layers_to_copy(lowerCAmelCase , lowerCAmelCase )
try:
if hasattr(
lowerCAmelCase , "prophetnet" ): # For ProphetNet, student.model.encoder.layers is called student.prophetnet.encoder.layers
copy_layers(teacher.prophetnet.encoder.layers , student.prophetnet.encoder.layers , lowerCAmelCase )
copy_layers(teacher.prophetnet.decoder.layers , student.prophetnet.decoder.layers , lowerCAmelCase )
else:
copy_layers(teacher.model.encoder.layers , student.model.encoder.layers , lowerCAmelCase )
copy_layers(teacher.model.decoder.layers , student.model.decoder.layers , lowerCAmelCase )
except AttributeError: # For t5, student.model.encoder.layers is called student.encoder.block
copy_layers(teacher.encoder.block , student.encoder.block , lowerCAmelCase )
copy_layers(teacher.decoder.block , student.decoder.block , lowerCAmelCase )
logger.info(
F"Copied encoder layers {e_layers_to_copy} and decoder layers {d_layers_to_copy}. Saving them to {save_path}" )
UpperCAmelCase__ : int = {
"teacher_type": teacher.config.model_type,
"copied_encoder_layers": e_layers_to_copy,
"copied_decoder_layers": d_layers_to_copy,
}
student.save_pretrained(lowerCAmelCase )
# Save information about copying for easier reproducibility
return student, e_layers_to_copy, d_layers_to_copy
if __name__ == "__main__":
fire.Fire(create_student_by_copying_alternating_layers)
| 660 | 1 |
"""simple docstring"""
def a__ ( lowerCAmelCase : int ):
'''simple docstring'''
if num < 0:
return False
UpperCAmelCase__ : int = num
UpperCAmelCase__ : int = 0
while num > 0:
UpperCAmelCase__ : Any = rev_num * 10 + (num % 10)
num //= 10
return num_copy == rev_num
if __name__ == "__main__":
import doctest
doctest.testmod()
| 660 |
"""simple docstring"""
import unittest
import numpy as np
from diffusers import LMSDiscreteScheduler, OnnxStableDiffusionInpaintPipeline
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
nightly,
require_onnxruntime,
require_torch_gpu,
)
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class _lowercase ( lowerCAmelCase_ , unittest.TestCase ):
'''simple docstring'''
pass
@nightly
@require_onnxruntime
@require_torch_gpu
class _lowercase ( unittest.TestCase ):
'''simple docstring'''
@property
def lowerCAmelCase__ ( self )-> int:
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def lowerCAmelCase__ ( self )-> Any:
UpperCAmelCase__ : Tuple = ort.SessionOptions()
UpperCAmelCase__ : List[str] = False
return options
def lowerCAmelCase__ ( self )-> List[str]:
UpperCAmelCase__ : Union[str, Any] = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/in_paint/overture-creations-5sI6fQgYIuo.png" )
UpperCAmelCase__ : int = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/in_paint/overture-creations-5sI6fQgYIuo_mask.png" )
UpperCAmelCase__ : str = OnnxStableDiffusionInpaintPipeline.from_pretrained(
"runwayml/stable-diffusion-inpainting" , revision="onnx" , safety_checker=__UpperCamelCase , feature_extractor=__UpperCamelCase , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=__UpperCamelCase )
UpperCAmelCase__ : int = "A red cat sitting on a park bench"
UpperCAmelCase__ : Tuple = np.random.RandomState(0 )
UpperCAmelCase__ : Any = pipe(
prompt=__UpperCamelCase , image=__UpperCamelCase , mask_image=__UpperCamelCase , guidance_scale=7.5 , num_inference_steps=10 , generator=__UpperCamelCase , output_type="np" , )
UpperCAmelCase__ : Tuple = output.images
UpperCAmelCase__ : Dict = images[0, 2_55:2_58, 2_55:2_58, -1]
assert images.shape == (1, 5_12, 5_12, 3)
UpperCAmelCase__ : Union[str, Any] = np.array([0.2514, 0.3007, 0.3517, 0.1790, 0.2382, 0.3167, 0.1944, 0.2273, 0.2464] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def lowerCAmelCase__ ( self )-> Tuple:
UpperCAmelCase__ : Tuple = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/in_paint/overture-creations-5sI6fQgYIuo.png" )
UpperCAmelCase__ : Tuple = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/in_paint/overture-creations-5sI6fQgYIuo_mask.png" )
UpperCAmelCase__ : Optional[Any] = LMSDiscreteScheduler.from_pretrained(
"runwayml/stable-diffusion-inpainting" , subfolder="scheduler" , revision="onnx" )
UpperCAmelCase__ : Optional[Any] = OnnxStableDiffusionInpaintPipeline.from_pretrained(
"runwayml/stable-diffusion-inpainting" , revision="onnx" , scheduler=__UpperCamelCase , safety_checker=__UpperCamelCase , feature_extractor=__UpperCamelCase , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=__UpperCamelCase )
UpperCAmelCase__ : int = "A red cat sitting on a park bench"
UpperCAmelCase__ : List[str] = np.random.RandomState(0 )
UpperCAmelCase__ : str = pipe(
prompt=__UpperCamelCase , image=__UpperCamelCase , mask_image=__UpperCamelCase , guidance_scale=7.5 , num_inference_steps=20 , generator=__UpperCamelCase , output_type="np" , )
UpperCAmelCase__ : List[str] = output.images
UpperCAmelCase__ : List[Any] = images[0, 2_55:2_58, 2_55:2_58, -1]
assert images.shape == (1, 5_12, 5_12, 3)
UpperCAmelCase__ : int = np.array([0.0086, 0.0077, 0.0083, 0.0093, 0.0107, 0.0139, 0.0094, 0.0097, 0.0125] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
| 660 | 1 |
"""simple docstring"""
from __future__ import annotations
import copy
import inspect
import unittest
import numpy as np
from transformers import is_tf_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_tf, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST,
TF_MODEL_FOR_MULTIPLE_CHOICE_MAPPING,
TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
LayoutLMvaConfig,
TFLayoutLMvaForQuestionAnswering,
TFLayoutLMvaForSequenceClassification,
TFLayoutLMvaForTokenClassification,
TFLayoutLMvaModel,
)
if is_vision_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class _lowercase :
'''simple docstring'''
def __init__( self , __UpperCamelCase , __UpperCamelCase=2 , __UpperCamelCase=3 , __UpperCamelCase=4 , __UpperCamelCase=2 , __UpperCamelCase=7 , __UpperCamelCase=True , __UpperCamelCase=True , __UpperCamelCase=True , __UpperCamelCase=True , __UpperCamelCase=99 , __UpperCamelCase=36 , __UpperCamelCase=2 , __UpperCamelCase=4 , __UpperCamelCase=37 , __UpperCamelCase="gelu" , __UpperCamelCase=0.1 , __UpperCamelCase=0.1 , __UpperCamelCase=5_12 , __UpperCamelCase=16 , __UpperCamelCase=2 , __UpperCamelCase=0.02 , __UpperCamelCase=6 , __UpperCamelCase=6 , __UpperCamelCase=3 , __UpperCamelCase=4 , __UpperCamelCase=None , __UpperCamelCase=10_00 , )-> Any:
UpperCAmelCase__ : str = parent
UpperCAmelCase__ : List[Any] = batch_size
UpperCAmelCase__ : Optional[int] = num_channels
UpperCAmelCase__ : Optional[int] = image_size
UpperCAmelCase__ : Dict = patch_size
UpperCAmelCase__ : List[str] = is_training
UpperCAmelCase__ : Optional[int] = use_input_mask
UpperCAmelCase__ : Optional[int] = use_token_type_ids
UpperCAmelCase__ : List[Any] = use_labels
UpperCAmelCase__ : str = vocab_size
UpperCAmelCase__ : Optional[Any] = hidden_size
UpperCAmelCase__ : Dict = num_hidden_layers
UpperCAmelCase__ : str = num_attention_heads
UpperCAmelCase__ : Tuple = intermediate_size
UpperCAmelCase__ : List[Any] = hidden_act
UpperCAmelCase__ : Optional[int] = hidden_dropout_prob
UpperCAmelCase__ : List[str] = attention_probs_dropout_prob
UpperCAmelCase__ : int = max_position_embeddings
UpperCAmelCase__ : Union[str, Any] = type_vocab_size
UpperCAmelCase__ : Dict = type_sequence_label_size
UpperCAmelCase__ : List[Any] = initializer_range
UpperCAmelCase__ : List[Any] = coordinate_size
UpperCAmelCase__ : Tuple = shape_size
UpperCAmelCase__ : Optional[Any] = num_labels
UpperCAmelCase__ : Tuple = num_choices
UpperCAmelCase__ : Optional[Any] = scope
UpperCAmelCase__ : str = range_bbox
# LayoutLMv3's sequence length equals the number of text tokens + number of patches + 1 (we add 1 for the CLS token)
UpperCAmelCase__ : Any = text_seq_length
UpperCAmelCase__ : Union[str, Any] = (image_size // patch_size) ** 2 + 1
UpperCAmelCase__ : Union[str, Any] = self.text_seq_length + self.image_seq_length
def lowerCAmelCase__ ( self )-> List[str]:
UpperCAmelCase__ : Dict = ids_tensor([self.batch_size, self.text_seq_length] , self.vocab_size )
UpperCAmelCase__ : Optional[int] = ids_tensor([self.batch_size, self.text_seq_length, 4] , self.range_bbox )
UpperCAmelCase__ : Union[str, Any] = bbox.numpy()
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
UpperCAmelCase__ : Dict = bbox[i, j, 3]
UpperCAmelCase__ : Optional[Any] = bbox[i, j, 1]
UpperCAmelCase__ : Union[str, Any] = tmp_coordinate
if bbox[i, j, 2] < bbox[i, j, 0]:
UpperCAmelCase__ : Any = bbox[i, j, 2]
UpperCAmelCase__ : Optional[int] = bbox[i, j, 0]
UpperCAmelCase__ : Optional[Any] = tmp_coordinate
UpperCAmelCase__ : List[str] = tf.constant(__UpperCamelCase )
UpperCAmelCase__ : Optional[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCAmelCase__ : Optional[int] = None
if self.use_input_mask:
UpperCAmelCase__ : Optional[int] = random_attention_mask([self.batch_size, self.text_seq_length] )
UpperCAmelCase__ : Optional[Any] = None
if self.use_token_type_ids:
UpperCAmelCase__ : int = ids_tensor([self.batch_size, self.text_seq_length] , self.type_vocab_size )
UpperCAmelCase__ : Union[str, Any] = None
UpperCAmelCase__ : Tuple = None
if self.use_labels:
UpperCAmelCase__ : str = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCAmelCase__ : Any = ids_tensor([self.batch_size, self.text_seq_length] , self.num_labels )
UpperCAmelCase__ : Optional[int] = LayoutLMvaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , coordinate_size=self.coordinate_size , shape_size=self.shape_size , input_size=self.image_size , patch_size=self.patch_size , )
return config, input_ids, bbox, pixel_values, token_type_ids, input_mask, sequence_labels, token_labels
def lowerCAmelCase__ ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )-> int:
UpperCAmelCase__ : Optional[int] = TFLayoutLMvaModel(config=__UpperCamelCase )
# text + image
UpperCAmelCase__ : Tuple = model(__UpperCamelCase , pixel_values=__UpperCamelCase , training=__UpperCamelCase )
UpperCAmelCase__ : Optional[Any] = model(
__UpperCamelCase , bbox=__UpperCamelCase , pixel_values=__UpperCamelCase , attention_mask=__UpperCamelCase , token_type_ids=__UpperCamelCase , training=__UpperCamelCase , )
UpperCAmelCase__ : List[str] = model(__UpperCamelCase , bbox=__UpperCamelCase , pixel_values=__UpperCamelCase , training=__UpperCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
# text only
UpperCAmelCase__ : Optional[Any] = model(__UpperCamelCase , training=__UpperCamelCase )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.text_seq_length, self.hidden_size) )
# image only
UpperCAmelCase__ : Any = model({"pixel_values": pixel_values} , training=__UpperCamelCase )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.image_seq_length, self.hidden_size) )
def lowerCAmelCase__ ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )-> List[str]:
UpperCAmelCase__ : Dict = self.num_labels
UpperCAmelCase__ : Any = TFLayoutLMvaForSequenceClassification(config=__UpperCamelCase )
UpperCAmelCase__ : Union[str, Any] = model(
__UpperCamelCase , bbox=__UpperCamelCase , pixel_values=__UpperCamelCase , attention_mask=__UpperCamelCase , token_type_ids=__UpperCamelCase , labels=__UpperCamelCase , training=__UpperCamelCase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCAmelCase__ ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )-> int:
UpperCAmelCase__ : int = self.num_labels
UpperCAmelCase__ : str = TFLayoutLMvaForTokenClassification(config=__UpperCamelCase )
UpperCAmelCase__ : Optional[int] = model(
__UpperCamelCase , bbox=__UpperCamelCase , pixel_values=__UpperCamelCase , attention_mask=__UpperCamelCase , token_type_ids=__UpperCamelCase , labels=__UpperCamelCase , training=__UpperCamelCase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.text_seq_length, self.num_labels) )
def lowerCAmelCase__ ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )-> List[str]:
UpperCAmelCase__ : Union[str, Any] = 2
UpperCAmelCase__ : List[str] = TFLayoutLMvaForQuestionAnswering(config=__UpperCamelCase )
UpperCAmelCase__ : Optional[Any] = model(
__UpperCamelCase , bbox=__UpperCamelCase , pixel_values=__UpperCamelCase , attention_mask=__UpperCamelCase , token_type_ids=__UpperCamelCase , start_positions=__UpperCamelCase , end_positions=__UpperCamelCase , training=__UpperCamelCase , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowerCAmelCase__ ( self )-> List[Any]:
UpperCAmelCase__ : Tuple = self.prepare_config_and_inputs()
((UpperCAmelCase__) , (UpperCAmelCase__) , (UpperCAmelCase__) , (UpperCAmelCase__) , (UpperCAmelCase__) , (UpperCAmelCase__) , (UpperCAmelCase__) , (UpperCAmelCase__)) : Tuple = config_and_inputs
UpperCAmelCase__ : Optional[Any] = {
"input_ids": input_ids,
"bbox": bbox,
"pixel_values": pixel_values,
"token_type_ids": token_type_ids,
"attention_mask": input_mask,
}
return config, inputs_dict
@require_tf
class _lowercase ( lowerCAmelCase_ , lowerCAmelCase_ , unittest.TestCase ):
'''simple docstring'''
_A = (
(
TFLayoutLMvaModel,
TFLayoutLMvaForQuestionAnswering,
TFLayoutLMvaForSequenceClassification,
TFLayoutLMvaForTokenClassification,
)
if is_tf_available()
else ()
)
_A = (
{'document-question-answering': TFLayoutLMvaForQuestionAnswering, 'feature-extraction': TFLayoutLMvaModel}
if is_tf_available()
else {}
)
_A = False
_A = False
_A = False
def lowerCAmelCase__ ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )-> Tuple:
return True
def lowerCAmelCase__ ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase=False )-> dict:
UpperCAmelCase__ : Optional[int] = copy.deepcopy(__UpperCamelCase )
if model_class in get_values(__UpperCamelCase ):
UpperCAmelCase__ : List[Any] = {
k: tf.tile(tf.expand_dims(__UpperCamelCase , 1 ) , (1, self.model_tester.num_choices) + (1,) * (v.ndim - 1) )
if isinstance(__UpperCamelCase , tf.Tensor ) and v.ndim > 0
else v
for k, v in inputs_dict.items()
}
if return_labels:
if model_class in get_values(__UpperCamelCase ):
UpperCAmelCase__ : Dict = tf.ones(self.model_tester.batch_size , dtype=tf.intaa )
elif model_class in get_values(__UpperCamelCase ):
UpperCAmelCase__ : Union[str, Any] = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa )
UpperCAmelCase__ : Optional[int] = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa )
elif model_class in get_values(__UpperCamelCase ):
UpperCAmelCase__ : List[str] = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa )
elif model_class in get_values(__UpperCamelCase ):
UpperCAmelCase__ : Tuple = tf.zeros(
(self.model_tester.batch_size, self.model_tester.text_seq_length) , dtype=tf.intaa )
return inputs_dict
def lowerCAmelCase__ ( self )-> int:
UpperCAmelCase__ : Union[str, Any] = TFLayoutLMvaModelTester(self )
UpperCAmelCase__ : str = ConfigTester(self , config_class=__UpperCamelCase , hidden_size=37 )
def lowerCAmelCase__ ( self )-> str:
self.config_tester.run_common_tests()
def lowerCAmelCase__ ( self )-> Dict:
UpperCAmelCase__ , UpperCAmelCase__ : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase__ : Any = model_class(__UpperCamelCase )
if getattr(__UpperCamelCase , "hf_compute_loss" , __UpperCamelCase ):
# The number of elements in the loss should be the same as the number of elements in the label
UpperCAmelCase__ : Optional[int] = self._prepare_for_class(inputs_dict.copy() , __UpperCamelCase , return_labels=__UpperCamelCase )
UpperCAmelCase__ : Dict = prepared_for_class[
sorted(prepared_for_class.keys() - inputs_dict.keys() , reverse=__UpperCamelCase )[0]
]
UpperCAmelCase__ : Optional[int] = added_label.shape.as_list()[:1]
# Test that model correctly compute the loss with kwargs
UpperCAmelCase__ : str = self._prepare_for_class(inputs_dict.copy() , __UpperCamelCase , return_labels=__UpperCamelCase )
UpperCAmelCase__ : Union[str, Any] = prepared_for_class.pop("input_ids" )
UpperCAmelCase__ : Optional[Any] = model(__UpperCamelCase , **__UpperCamelCase )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
# Test that model correctly compute the loss when we mask some positions
UpperCAmelCase__ : Optional[Any] = self._prepare_for_class(inputs_dict.copy() , __UpperCamelCase , return_labels=__UpperCamelCase )
UpperCAmelCase__ : Optional[Any] = prepared_for_class.pop("input_ids" )
if "labels" in prepared_for_class:
UpperCAmelCase__ : str = prepared_for_class["labels"].numpy()
if len(labels.shape ) > 1 and labels.shape[1] != 1:
UpperCAmelCase__ : List[str] = -1_00
UpperCAmelCase__ : Dict = tf.convert_to_tensor(__UpperCamelCase )
UpperCAmelCase__ : Dict = model(__UpperCamelCase , **__UpperCamelCase )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
self.assertTrue(not np.any(np.isnan(loss.numpy() ) ) )
# Test that model correctly compute the loss with a dict
UpperCAmelCase__ : str = self._prepare_for_class(inputs_dict.copy() , __UpperCamelCase , return_labels=__UpperCamelCase )
UpperCAmelCase__ : Union[str, Any] = model(__UpperCamelCase )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
# Test that model correctly compute the loss with a tuple
UpperCAmelCase__ : int = self._prepare_for_class(inputs_dict.copy() , __UpperCamelCase , return_labels=__UpperCamelCase )
# Get keys that were added with the _prepare_for_class function
UpperCAmelCase__ : str = prepared_for_class.keys() - inputs_dict.keys()
UpperCAmelCase__ : Optional[int] = inspect.signature(model.call ).parameters
UpperCAmelCase__ : Tuple = list(signature.keys() )
# Create a dictionary holding the location of the tensors in the tuple
UpperCAmelCase__ : str = {0: "input_ids"}
for label_key in label_keys:
UpperCAmelCase__ : str = signature_names.index(__UpperCamelCase )
UpperCAmelCase__ : List[Any] = label_key
UpperCAmelCase__ : Optional[int] = sorted(tuple_index_mapping.items() )
# Initialize a list with their default values, update the values and convert to a tuple
UpperCAmelCase__ : Dict = []
for name in signature_names:
if name != "kwargs":
list_input.append(signature[name].default )
for index, value in sorted_tuple_index_mapping:
UpperCAmelCase__ : List[Any] = prepared_for_class[value]
UpperCAmelCase__ : Union[str, Any] = tuple(__UpperCamelCase )
# Send to model
UpperCAmelCase__ : str = model(tuple_input[:-1] )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
def lowerCAmelCase__ ( self )-> str:
(
(
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) ,
) : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
def lowerCAmelCase__ ( self )-> Optional[Any]:
(
(
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) ,
) : str = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
UpperCAmelCase__ : str = type
self.model_tester.create_and_check_model(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
def lowerCAmelCase__ ( self )-> Dict:
(
(
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) ,
) : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
def lowerCAmelCase__ ( self )-> str:
(
(
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) ,
) : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
def lowerCAmelCase__ ( self )-> int:
(
(
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) ,
) : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
@slow
def lowerCAmelCase__ ( self )-> str:
for model_name in TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase__ : int = TFLayoutLMvaModel.from_pretrained(__UpperCamelCase )
self.assertIsNotNone(__UpperCamelCase )
def a__ ( ):
'''simple docstring'''
UpperCAmelCase__ : Optional[int] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_tf
class _lowercase ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def lowerCAmelCase__ ( self )-> Any:
return LayoutLMvaImageProcessor(apply_ocr=__UpperCamelCase ) if is_vision_available() else None
@slow
def lowerCAmelCase__ ( self )-> List[Any]:
UpperCAmelCase__ : Optional[Any] = TFLayoutLMvaModel.from_pretrained("microsoft/layoutlmv3-base" )
UpperCAmelCase__ : List[Any] = self.default_image_processor
UpperCAmelCase__ : Optional[Any] = prepare_img()
UpperCAmelCase__ : Union[str, Any] = image_processor(images=__UpperCamelCase , return_tensors="tf" ).pixel_values
UpperCAmelCase__ : List[str] = tf.constant([[1, 2]] )
UpperCAmelCase__ : Optional[int] = tf.expand_dims(tf.constant([[1, 2, 3, 4], [5, 6, 7, 8]] ) , axis=0 )
# forward pass
UpperCAmelCase__ : Optional[int] = model(input_ids=__UpperCamelCase , bbox=__UpperCamelCase , pixel_values=__UpperCamelCase , training=__UpperCamelCase )
# verify the logits
UpperCAmelCase__ : str = (1, 1_99, 7_68)
self.assertEqual(outputs.last_hidden_state.shape , __UpperCamelCase )
UpperCAmelCase__ : int = tf.constant(
[[-0.0529, 0.3618, 0.1632], [-0.1587, -0.1667, -0.0400], [-0.1557, -0.1671, -0.0505]] )
self.assertTrue(np.allclose(outputs.last_hidden_state[0, :3, :3] , __UpperCamelCase , atol=1E-4 ) )
| 660 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
A__ : Union[str, Any] = logging.get_logger(__name__)
A__ : Optional[int] = {
"""microsoft/table-transformer-detection""": (
"""https://huggingface.co/microsoft/table-transformer-detection/resolve/main/config.json"""
),
}
class _lowercase ( lowerCAmelCase_ ):
'''simple docstring'''
_A = 'table-transformer'
_A = ['past_key_values']
_A = {
'hidden_size': 'd_model',
'num_attention_heads': 'encoder_attention_heads',
}
def __init__( self , __UpperCamelCase=True , __UpperCamelCase=None , __UpperCamelCase=3 , __UpperCamelCase=1_00 , __UpperCamelCase=6 , __UpperCamelCase=20_48 , __UpperCamelCase=8 , __UpperCamelCase=6 , __UpperCamelCase=20_48 , __UpperCamelCase=8 , __UpperCamelCase=0.0 , __UpperCamelCase=0.0 , __UpperCamelCase=True , __UpperCamelCase="relu" , __UpperCamelCase=2_56 , __UpperCamelCase=0.1 , __UpperCamelCase=0.0 , __UpperCamelCase=0.0 , __UpperCamelCase=0.02 , __UpperCamelCase=1.0 , __UpperCamelCase=False , __UpperCamelCase="sine" , __UpperCamelCase="resnet50" , __UpperCamelCase=True , __UpperCamelCase=False , __UpperCamelCase=1 , __UpperCamelCase=5 , __UpperCamelCase=2 , __UpperCamelCase=1 , __UpperCamelCase=1 , __UpperCamelCase=5 , __UpperCamelCase=2 , __UpperCamelCase=0.1 , **__UpperCamelCase , )-> List[Any]:
if backbone_config is not None and use_timm_backbone:
raise ValueError("You can't specify both `backbone_config` and `use_timm_backbone`." )
if not use_timm_backbone:
if backbone_config is None:
logger.info("`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone." )
UpperCAmelCase__ : Any = CONFIG_MAPPING["resnet"](out_features=["stage4"] )
elif isinstance(__UpperCamelCase , __UpperCamelCase ):
UpperCAmelCase__ : int = backbone_config.get("model_type" )
UpperCAmelCase__ : Optional[Any] = CONFIG_MAPPING[backbone_model_type]
UpperCAmelCase__ : int = config_class.from_dict(__UpperCamelCase )
# set timm attributes to None
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : List[str] = None, None, None
UpperCAmelCase__ : Optional[int] = use_timm_backbone
UpperCAmelCase__ : Dict = backbone_config
UpperCAmelCase__ : List[Any] = num_channels
UpperCAmelCase__ : Any = num_queries
UpperCAmelCase__ : int = d_model
UpperCAmelCase__ : Optional[int] = encoder_ffn_dim
UpperCAmelCase__ : str = encoder_layers
UpperCAmelCase__ : Dict = encoder_attention_heads
UpperCAmelCase__ : Optional[Any] = decoder_ffn_dim
UpperCAmelCase__ : Tuple = decoder_layers
UpperCAmelCase__ : Optional[Any] = decoder_attention_heads
UpperCAmelCase__ : List[str] = dropout
UpperCAmelCase__ : Tuple = attention_dropout
UpperCAmelCase__ : List[Any] = activation_dropout
UpperCAmelCase__ : Dict = activation_function
UpperCAmelCase__ : Optional[Any] = init_std
UpperCAmelCase__ : List[str] = init_xavier_std
UpperCAmelCase__ : int = encoder_layerdrop
UpperCAmelCase__ : Tuple = decoder_layerdrop
UpperCAmelCase__ : int = encoder_layers
UpperCAmelCase__ : Dict = auxiliary_loss
UpperCAmelCase__ : Union[str, Any] = position_embedding_type
UpperCAmelCase__ : List[str] = backbone
UpperCAmelCase__ : List[Any] = use_pretrained_backbone
UpperCAmelCase__ : List[str] = dilation
# Hungarian matcher
UpperCAmelCase__ : Dict = class_cost
UpperCAmelCase__ : Any = bbox_cost
UpperCAmelCase__ : Tuple = giou_cost
# Loss coefficients
UpperCAmelCase__ : Any = mask_loss_coefficient
UpperCAmelCase__ : Dict = dice_loss_coefficient
UpperCAmelCase__ : Any = bbox_loss_coefficient
UpperCAmelCase__ : Tuple = giou_loss_coefficient
UpperCAmelCase__ : List[Any] = eos_coefficient
super().__init__(is_encoder_decoder=__UpperCamelCase , **__UpperCamelCase )
@property
def lowerCAmelCase__ ( self )-> int:
return self.encoder_attention_heads
@property
def lowerCAmelCase__ ( self )-> int:
return self.d_model
class _lowercase ( lowerCAmelCase_ ):
'''simple docstring'''
_A = version.parse('1.11' )
@property
def lowerCAmelCase__ ( self )-> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
("pixel_mask", {0: "batch"}),
] )
@property
def lowerCAmelCase__ ( self )-> float:
return 1E-5
@property
def lowerCAmelCase__ ( self )-> int:
return 12
| 660 | 1 |
"""simple docstring"""
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxSeqaSeqConfigWithPast
from ...utils import logging
A__ : List[str] = logging.get_logger(__name__)
A__ : Tuple = {
"""google/umt5-small""": """https://huggingface.co/google/umt5-small/resolve/main/config.json""",
# See all umt5 models at https://huggingface.co/models?filter=umt5
}
class _lowercase ( lowerCAmelCase_ ):
'''simple docstring'''
_A = 'umt5'
_A = ['past_key_values']
def __init__( self , __UpperCamelCase=25_01_12 , __UpperCamelCase=5_12 , __UpperCamelCase=64 , __UpperCamelCase=10_24 , __UpperCamelCase=8 , __UpperCamelCase=None , __UpperCamelCase=6 , __UpperCamelCase=32 , __UpperCamelCase=1_28 , __UpperCamelCase=0.1 , __UpperCamelCase=1E-6 , __UpperCamelCase=1.0 , __UpperCamelCase="gated-gelu" , __UpperCamelCase=True , __UpperCamelCase=True , __UpperCamelCase="T5Tokenizer" , __UpperCamelCase=True , __UpperCamelCase=0 , __UpperCamelCase=1 , __UpperCamelCase=0 , **__UpperCamelCase , )-> str:
super().__init__(
is_encoder_decoder=__UpperCamelCase , tokenizer_class=__UpperCamelCase , tie_word_embeddings=__UpperCamelCase , pad_token_id=__UpperCamelCase , eos_token_id=__UpperCamelCase , decoder_start_token_id=__UpperCamelCase , **__UpperCamelCase , )
UpperCAmelCase__ : Any = vocab_size
UpperCAmelCase__ : str = d_model
UpperCAmelCase__ : Dict = d_kv
UpperCAmelCase__ : List[str] = d_ff
UpperCAmelCase__ : Tuple = num_layers
UpperCAmelCase__ : Tuple = (
num_decoder_layers if num_decoder_layers is not None else self.num_layers
) # default = symmetry
UpperCAmelCase__ : Optional[int] = num_heads
UpperCAmelCase__ : Dict = relative_attention_num_buckets
UpperCAmelCase__ : Any = relative_attention_max_distance
UpperCAmelCase__ : int = dropout_rate
UpperCAmelCase__ : Optional[Any] = layer_norm_epsilon
UpperCAmelCase__ : Tuple = initializer_factor
UpperCAmelCase__ : Optional[Any] = feed_forward_proj
UpperCAmelCase__ : Optional[int] = use_cache
UpperCAmelCase__ : str = self.feed_forward_proj.split("-" )
UpperCAmelCase__ : List[str] = act_info[-1]
UpperCAmelCase__ : Optional[Any] = act_info[0] == "gated"
if len(__UpperCamelCase ) > 1 and act_info[0] != "gated" or len(__UpperCamelCase ) > 2:
raise ValueError(
F"`feed_forward_proj`: {feed_forward_proj} is not a valid activation function of the dense layer."
"Please make sure `feed_forward_proj` is of the format `gated-{ACT_FN}` or `{ACT_FN}`, e.g. "
"'gated-gelu' or 'relu'" )
if feed_forward_proj == "gated-gelu":
UpperCAmelCase__ : Union[str, Any] = "gelu_new"
@property
def lowerCAmelCase__ ( self )-> Optional[Any]:
return self.d_model
@property
def lowerCAmelCase__ ( self )-> Dict:
return self.num_heads
@property
def lowerCAmelCase__ ( self )-> int:
return self.num_layers
class _lowercase ( lowerCAmelCase_ ):
'''simple docstring'''
@property
# Copied from transformers.models.t5.configuration_t5.T5OnnxConfig.inputs
def lowerCAmelCase__ ( self )-> Mapping[str, Mapping[int, str]]:
UpperCAmelCase__ : Any = {
"input_ids": {0: "batch", 1: "encoder_sequence"},
"attention_mask": {0: "batch", 1: "encoder_sequence"},
}
if self.use_past:
UpperCAmelCase__ : Optional[int] = "past_encoder_sequence + sequence"
UpperCAmelCase__ : List[str] = {0: "batch"}
UpperCAmelCase__ : Dict = {0: "batch", 1: "past_decoder_sequence + sequence"}
else:
UpperCAmelCase__ : Optional[Any] = {0: "batch", 1: "decoder_sequence"}
UpperCAmelCase__ : Dict = {0: "batch", 1: "decoder_sequence"}
if self.use_past:
self.fill_with_past_key_values_(__UpperCamelCase , direction="inputs" )
return common_inputs
@property
# Copied from transformers.models.t5.configuration_t5.T5OnnxConfig.default_onnx_opset
def lowerCAmelCase__ ( self )-> int:
return 13
@property
def lowerCAmelCase__ ( self )-> float:
return 5E-4
| 660 |
"""simple docstring"""
import argparse
import shutil
import time
from json import JSONDecodeError
from logging import getLogger
from pathlib import Path
from typing import Dict, List
import torch
from torch.utils.data import DataLoader
from tqdm import tqdm
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
from utils import (
SeqaSeqDataset,
calculate_bleu,
calculate_rouge,
chunks,
lmap,
load_json,
parse_numeric_n_bool_cl_kwargs,
save_json,
use_task_specific_params,
write_txt_file,
)
A__ : int = getLogger(__name__)
def a__ ( lowerCAmelCase : List[Any] , lowerCAmelCase : str , lowerCAmelCase : str , lowerCAmelCase : int = 8 , lowerCAmelCase : int = 1024 , lowerCAmelCase : List[Any]="val" , lowerCAmelCase : str=None , lowerCAmelCase : int=False , lowerCAmelCase : Dict="summarization" , lowerCAmelCase : int=None , lowerCAmelCase : List[str]=1 , lowerCAmelCase : Dict = None , lowerCAmelCase : List[str]="" , **lowerCAmelCase : int , ):
'''simple docstring'''
UpperCAmelCase__ : Dict = str(lowerCAmelCase )
assert local_rank is not None
torch.distributed.init_process_group(backend="nccl" , rank=lowerCAmelCase )
UpperCAmelCase__ : List[str] = Path(lowerCAmelCase )
UpperCAmelCase__ : str = save_dir.joinpath(F"rank_{local_rank}_output.json" )
torch.cuda.set_device(lowerCAmelCase )
UpperCAmelCase__ : Optional[Any] = AutoModelForSeqaSeqLM.from_pretrained(lowerCAmelCase ).cuda()
if fpaa:
UpperCAmelCase__ : List[Any] = model.half()
# determine if we need to increase num_beams
use_task_specific_params(lowerCAmelCase , lowerCAmelCase ) # update config with task specific params
UpperCAmelCase__ : List[Any] = generate_kwargs.pop("num_beams" , model.config.num_beams ) # AttributeError risk?
if num_return_sequences > num_beams:
UpperCAmelCase__ : Any = num_return_sequences
UpperCAmelCase__ : List[Any] = AutoTokenizer.from_pretrained(lowerCAmelCase )
logger.info(F"Inferred tokenizer type: {tokenizer.__class__}" ) # if this is wrong, check config.model_type.
if max_source_length is None:
UpperCAmelCase__ : int = tokenizer.model_max_length
if prefix is None:
UpperCAmelCase__ : Union[str, Any] = prefix or getattr(model.config , "prefix" , "" ) or ""
UpperCAmelCase__ : str = SeqaSeqDataset(
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , max_target_length=1024 , type_path=lowerCAmelCase , n_obs=lowerCAmelCase , prefix=lowerCAmelCase , **lowerCAmelCase , )
# I set shuffle=True for a more accurate progress bar.
# If all the longest samples are first, the prog bar estimate is too high at the beginning.
UpperCAmelCase__ : Union[str, Any] = ds.make_sortish_sampler(lowerCAmelCase , distributed=lowerCAmelCase , add_extra_examples=lowerCAmelCase , shuffle=lowerCAmelCase )
UpperCAmelCase__ : Optional[int] = DataLoader(lowerCAmelCase , sampler=lowerCAmelCase , batch_size=lowerCAmelCase , collate_fn=ds.collate_fn )
UpperCAmelCase__ : str = []
for batch in tqdm(lowerCAmelCase ):
UpperCAmelCase__ : Dict = model.generate(
input_ids=batch["input_ids"].to(model.device ) , attention_mask=batch["attention_mask"].to(model.device ) , num_return_sequences=lowerCAmelCase , num_beams=lowerCAmelCase , **lowerCAmelCase , )
UpperCAmelCase__ : int = tokenizer.batch_decode(lowerCAmelCase , skip_special_tokens=lowerCAmelCase , clean_up_tokenization_spaces=lowerCAmelCase )
UpperCAmelCase__ : int = batch["ids"]
if num_return_sequences > 1:
UpperCAmelCase__ : str = chunks(lowerCAmelCase , lowerCAmelCase ) # batch size chunks, each of size num_return_seq
for i, pred in enumerate(lowerCAmelCase ):
results.append({"pred": pred, "id": ids[i].item()} )
save_json(lowerCAmelCase , lowerCAmelCase )
return results, sampler.num_replicas
def a__ ( ):
'''simple docstring'''
UpperCAmelCase__ : str = argparse.ArgumentParser(
epilog="Unspecified args like --num_beams=2 --decoder_start_token_id=4 are passed to model.generate" )
parser.add_argument("--data_dir" , type=lowerCAmelCase , help="like cnn_dm/test.source" )
parser.add_argument(
"--model_name" , type=lowerCAmelCase , help="like facebook/bart-large-cnn,t5-base, etc." , default="sshleifer/distilbart-xsum-12-3" , )
parser.add_argument("--save_dir" , type=lowerCAmelCase , help="where to save" , default="tmp_gen" )
parser.add_argument("--max_source_length" , type=lowerCAmelCase , default=lowerCAmelCase )
parser.add_argument(
"--type_path" , type=lowerCAmelCase , default="test" , help="which subset to evaluate typically train/val/test" )
parser.add_argument("--task" , type=lowerCAmelCase , default="summarization" , help="used for task_specific_params + metrics" )
parser.add_argument("--bs" , type=lowerCAmelCase , default=8 , required=lowerCAmelCase , help="batch size" )
parser.add_argument(
"--local_rank" , type=lowerCAmelCase , default=-1 , required=lowerCAmelCase , help="should be passed by distributed.launch" )
parser.add_argument(
"--n_obs" , type=lowerCAmelCase , default=lowerCAmelCase , required=lowerCAmelCase , help="How many observations. Defaults to all." )
parser.add_argument(
"--num_return_sequences" , type=lowerCAmelCase , default=1 , required=lowerCAmelCase , help="How many sequences to return" )
parser.add_argument(
"--sync_timeout" , type=lowerCAmelCase , default=600 , required=lowerCAmelCase , help="How long should master process wait for other processes to finish." , )
parser.add_argument("--src_lang" , type=lowerCAmelCase , default=lowerCAmelCase , required=lowerCAmelCase )
parser.add_argument("--tgt_lang" , type=lowerCAmelCase , default=lowerCAmelCase , required=lowerCAmelCase )
parser.add_argument(
"--prefix" , type=lowerCAmelCase , required=lowerCAmelCase , default=lowerCAmelCase , help="will be added to the begininng of src examples" )
parser.add_argument("--fp16" , action="store_true" )
parser.add_argument("--debug" , action="store_true" )
UpperCAmelCase__ : Optional[int] = time.time()
UpperCAmelCase__ , UpperCAmelCase__ : List[Any] = parser.parse_known_args()
UpperCAmelCase__ : int = parse_numeric_n_bool_cl_kwargs(lowerCAmelCase )
if generate_kwargs and args.local_rank <= 0:
print(F"parsed the following generate kwargs: {generate_kwargs}" )
UpperCAmelCase__ : Dict = Path(args.save_dir + "_tmp" )
Path(lowerCAmelCase ).mkdir(exist_ok=lowerCAmelCase ) # this handles locking.
UpperCAmelCase__ : List[str] = list(json_save_dir.glob("rank_*.json" ) )
if intermediate_files:
raise ValueError(F"Found files at {json_save_dir} please move or remove them." )
# In theory, a node could finish and save before another node hits this. If this happens, we can address later.
UpperCAmelCase__ : List[str] = {}
if args.src_lang is not None:
UpperCAmelCase__ : str = args.src_lang
if args.tgt_lang is not None:
UpperCAmelCase__ : List[str] = args.tgt_lang
Path(args.save_dir ).mkdir(exist_ok=lowerCAmelCase )
UpperCAmelCase__ , UpperCAmelCase__ : Optional[int] = eval_data_dir(
args.data_dir , lowerCAmelCase , args.model_name , type_path=args.type_path , bs=args.bs , fpaa=args.fpaa , task=args.task , local_rank=args.local_rank , n_obs=args.n_obs , max_source_length=args.max_source_length , num_return_sequences=args.num_return_sequences , prefix=args.prefix , dataset_kwargs=lowerCAmelCase , **lowerCAmelCase , )
if args.local_rank <= 0:
UpperCAmelCase__ : str = Path(args.save_dir )
save_dir.mkdir(exist_ok=lowerCAmelCase )
UpperCAmelCase__ : Tuple = gather_results_from_each_node(lowerCAmelCase , lowerCAmelCase , args.sync_timeout )
UpperCAmelCase__ : Union[str, Any] = combine_partial_results(lowerCAmelCase )
if args.num_return_sequences > 1:
UpperCAmelCase__ : int = save_dir.joinpath("pseudolabel_results.json" )
print(F"Saving aggregated results at {save_path}, intermediate in {json_save_dir}/" )
save_json(lowerCAmelCase , lowerCAmelCase )
return
UpperCAmelCase__ : Optional[Any] = Path(args.data_dir ).joinpath(args.type_path + ".target" )
with open(lowerCAmelCase ) as f:
UpperCAmelCase__ : Optional[int] = [x.rstrip() for x in f.readlines()][: len(lowerCAmelCase )]
# Calculate metrics, save metrics, and save _generations.txt
UpperCAmelCase__ : List[Any] = "translation" in args.task
UpperCAmelCase__ : Optional[Any] = calculate_bleu if calc_bleu else calculate_rouge
UpperCAmelCase__ : Optional[Any] = "bleu" if calc_bleu else "rouge"
UpperCAmelCase__ : Dict = score_fn(lowerCAmelCase , lowerCAmelCase )
UpperCAmelCase__ : List[Any] = len(lowerCAmelCase )
UpperCAmelCase__ : Union[str, Any] = time.time() - start_time
UpperCAmelCase__ : Optional[int] = round(runtime / metrics["n_obs"] , 4 )
UpperCAmelCase__ : Tuple = num_replicas
# TODO(@stas00): add whatever metadata to metrics
UpperCAmelCase__ : Any = save_dir.joinpath(F"{args.type_path}_{metric_name}.json" )
save_json(lowerCAmelCase , lowerCAmelCase , indent=lowerCAmelCase )
print(lowerCAmelCase )
write_txt_file(lowerCAmelCase , save_dir.joinpath(F"{args.type_path}_generations.txt" ) )
if args.debug:
write_txt_file(lowerCAmelCase , save_dir.joinpath(F"{args.type_path}.target" ) )
else:
shutil.rmtree(lowerCAmelCase )
def a__ ( lowerCAmelCase : Tuple ):
'''simple docstring'''
UpperCAmelCase__ : str = []
for partial_result in partial_results:
records.extend(lowerCAmelCase )
UpperCAmelCase__ : Dict = sorted(lowerCAmelCase , key=lambda lowerCAmelCase : x["id"] )
UpperCAmelCase__ : List[str] = [x["pred"] for x in records]
return preds
def a__ ( lowerCAmelCase : List[Any] , lowerCAmelCase : int , lowerCAmelCase : Optional[int] ):
'''simple docstring'''
# WAIT FOR lots of .json files
UpperCAmelCase__ : int = time.time()
logger.info("waiting for all nodes to finish" )
UpperCAmelCase__ : Dict = None
while (time.time() - start_wait) < timeout:
UpperCAmelCase__ : str = list(save_dir.glob("rank_*.json" ) )
if len(lowerCAmelCase ) < num_replicas:
continue
try:
# make sure all json files are fully saved
UpperCAmelCase__ : Union[str, Any] = lmap(lowerCAmelCase , lowerCAmelCase )
return json_data
except JSONDecodeError:
continue
else:
raise TimeoutError("Rank 0 gave up on waiting for other processes" )
# Unreachable
if __name__ == "__main__":
# Usage for MT:
run_generate()
| 660 | 1 |
"""simple docstring"""
import unittest
from transformers.testing_utils import CaptureStdout
from transformers.tools.python_interpreter import evaluate
def a__ ( lowerCAmelCase : str ):
'''simple docstring'''
return x + 2
class _lowercase ( unittest.TestCase ):
'''simple docstring'''
def lowerCAmelCase__ ( self )-> str:
UpperCAmelCase__ : Dict = "x = 3"
UpperCAmelCase__ : Optional[Any] = {}
UpperCAmelCase__ : int = evaluate(__UpperCamelCase , {} , state=__UpperCamelCase )
assert result == 3
self.assertDictEqual(__UpperCamelCase , {"x": 3} )
UpperCAmelCase__ : Optional[Any] = "x = y"
UpperCAmelCase__ : Optional[int] = {"y": 5}
UpperCAmelCase__ : Any = evaluate(__UpperCamelCase , {} , state=__UpperCamelCase )
# evaluate returns the value of the last assignment.
assert result == 5
self.assertDictEqual(__UpperCamelCase , {"x": 5, "y": 5} )
def lowerCAmelCase__ ( self )-> Optional[Any]:
UpperCAmelCase__ : Optional[Any] = "y = add_two(x)"
UpperCAmelCase__ : List[str] = {"x": 3}
UpperCAmelCase__ : Tuple = evaluate(__UpperCamelCase , {"add_two": add_two} , state=__UpperCamelCase )
assert result == 5
self.assertDictEqual(__UpperCamelCase , {"x": 3, "y": 5} )
# Won't work without the tool
with CaptureStdout() as out:
UpperCAmelCase__ : Any = evaluate(__UpperCamelCase , {} , state=__UpperCamelCase )
assert result is None
assert "tried to execute add_two" in out.out
def lowerCAmelCase__ ( self )-> Union[str, Any]:
UpperCAmelCase__ : Optional[Any] = "x = 3"
UpperCAmelCase__ : List[str] = {}
UpperCAmelCase__ : Dict = evaluate(__UpperCamelCase , {} , state=__UpperCamelCase )
assert result == 3
self.assertDictEqual(__UpperCamelCase , {"x": 3} )
def lowerCAmelCase__ ( self )-> List[str]:
UpperCAmelCase__ : List[str] = "test_dict = {'x': x, 'y': add_two(x)}"
UpperCAmelCase__ : Optional[Any] = {"x": 3}
UpperCAmelCase__ : int = evaluate(__UpperCamelCase , {"add_two": add_two} , state=__UpperCamelCase )
self.assertDictEqual(__UpperCamelCase , {"x": 3, "y": 5} )
self.assertDictEqual(__UpperCamelCase , {"x": 3, "test_dict": {"x": 3, "y": 5}} )
def lowerCAmelCase__ ( self )-> Tuple:
UpperCAmelCase__ : Optional[int] = "x = 3\ny = 5"
UpperCAmelCase__ : Optional[int] = {}
UpperCAmelCase__ : List[str] = evaluate(__UpperCamelCase , {} , state=__UpperCamelCase )
# evaluate returns the value of the last assignment.
assert result == 5
self.assertDictEqual(__UpperCamelCase , {"x": 3, "y": 5} )
def lowerCAmelCase__ ( self )-> Optional[int]:
UpperCAmelCase__ : Union[str, Any] = "text = f'This is x: {x}.'"
UpperCAmelCase__ : Any = {"x": 3}
UpperCAmelCase__ : Union[str, Any] = evaluate(__UpperCamelCase , {} , state=__UpperCamelCase )
# evaluate returns the value of the last assignment.
assert result == "This is x: 3."
self.assertDictEqual(__UpperCamelCase , {"x": 3, "text": "This is x: 3."} )
def lowerCAmelCase__ ( self )-> Optional[Any]:
UpperCAmelCase__ : Union[str, Any] = "if x <= 3:\n y = 2\nelse:\n y = 5"
UpperCAmelCase__ : str = {"x": 3}
UpperCAmelCase__ : Union[str, Any] = evaluate(__UpperCamelCase , {} , state=__UpperCamelCase )
# evaluate returns the value of the last assignment.
assert result == 2
self.assertDictEqual(__UpperCamelCase , {"x": 3, "y": 2} )
UpperCAmelCase__ : Tuple = {"x": 8}
UpperCAmelCase__ : str = evaluate(__UpperCamelCase , {} , state=__UpperCamelCase )
# evaluate returns the value of the last assignment.
assert result == 5
self.assertDictEqual(__UpperCamelCase , {"x": 8, "y": 5} )
def lowerCAmelCase__ ( self )-> List[str]:
UpperCAmelCase__ : Optional[int] = "test_list = [x, add_two(x)]"
UpperCAmelCase__ : str = {"x": 3}
UpperCAmelCase__ : Tuple = evaluate(__UpperCamelCase , {"add_two": add_two} , state=__UpperCamelCase )
self.assertListEqual(__UpperCamelCase , [3, 5] )
self.assertDictEqual(__UpperCamelCase , {"x": 3, "test_list": [3, 5]} )
def lowerCAmelCase__ ( self )-> Optional[Any]:
UpperCAmelCase__ : int = "y = x"
UpperCAmelCase__ : Optional[int] = {"x": 3}
UpperCAmelCase__ : Optional[int] = evaluate(__UpperCamelCase , {} , state=__UpperCamelCase )
assert result == 3
self.assertDictEqual(__UpperCamelCase , {"x": 3, "y": 3} )
def lowerCAmelCase__ ( self )-> List[str]:
UpperCAmelCase__ : Any = "test_list = [x, add_two(x)]\ntest_list[1]"
UpperCAmelCase__ : Tuple = {"x": 3}
UpperCAmelCase__ : str = evaluate(__UpperCamelCase , {"add_two": add_two} , state=__UpperCamelCase )
assert result == 5
self.assertDictEqual(__UpperCamelCase , {"x": 3, "test_list": [3, 5]} )
UpperCAmelCase__ : List[Any] = "test_dict = {'x': x, 'y': add_two(x)}\ntest_dict['y']"
UpperCAmelCase__ : Union[str, Any] = {"x": 3}
UpperCAmelCase__ : Optional[int] = evaluate(__UpperCamelCase , {"add_two": add_two} , state=__UpperCamelCase )
assert result == 5
self.assertDictEqual(__UpperCamelCase , {"x": 3, "test_dict": {"x": 3, "y": 5}} )
def lowerCAmelCase__ ( self )-> Any:
UpperCAmelCase__ : Any = "x = 0\nfor i in range(3):\n x = i"
UpperCAmelCase__ : Optional[Any] = {}
UpperCAmelCase__ : str = evaluate(__UpperCamelCase , {"range": range} , state=__UpperCamelCase )
assert result == 2
self.assertDictEqual(__UpperCamelCase , {"x": 2, "i": 2} )
| 660 |
"""simple docstring"""
from timeit import timeit
def a__ ( lowerCAmelCase : int ):
'''simple docstring'''
if number < 0:
raise ValueError("the value of input must not be negative" )
UpperCAmelCase__ : Tuple = 0
while number:
number &= number - 1
result += 1
return result
def a__ ( lowerCAmelCase : int ):
'''simple docstring'''
if number < 0:
raise ValueError("the value of input must not be negative" )
UpperCAmelCase__ : Union[str, Any] = 0
while number:
if number % 2 == 1:
result += 1
number >>= 1
return result
def a__ ( ):
'''simple docstring'''
def do_benchmark(lowerCAmelCase : int ) -> None:
UpperCAmelCase__ : Dict = "import __main__ as z"
print(F"Benchmark when {number = }:" )
print(F"{get_set_bits_count_using_modulo_operator(lowerCAmelCase ) = }" )
UpperCAmelCase__ : Tuple = timeit("z.get_set_bits_count_using_modulo_operator(25)" , setup=lowerCAmelCase )
print(F"timeit() runs in {timing} seconds" )
print(F"{get_set_bits_count_using_brian_kernighans_algorithm(lowerCAmelCase ) = }" )
UpperCAmelCase__ : Any = timeit(
"z.get_set_bits_count_using_brian_kernighans_algorithm(25)" , setup=lowerCAmelCase , )
print(F"timeit() runs in {timing} seconds" )
for number in (25, 37, 58, 0):
do_benchmark(lowerCAmelCase )
print()
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 660 | 1 |
"""simple docstring"""
import warnings
from pathlib import Path
from typing import List, Tuple, Union
import fire
from torch import nn
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer, PreTrainedModel
from transformers.utils import logging
A__ : Tuple = logging.get_logger(__name__)
def a__ ( lowerCAmelCase : nn.ModuleList , lowerCAmelCase : nn.ModuleList , lowerCAmelCase : List[int] ):
'''simple docstring'''
UpperCAmelCase__ : Optional[Any] = nn.ModuleList([src_layers[i] for i in layers_to_copy] )
assert len(lowerCAmelCase ) == len(lowerCAmelCase ), F"{len(lowerCAmelCase )} != {len(lowerCAmelCase )}"
dest_layers.load_state_dict(layers_to_copy.state_dict() )
A__ : List[Any] = {
# maps num layers in teacher -> num_layers in student -> which teacher layers to copy.
# 12: bart, 16: pegasus, 6: marian/Helsinki-NLP
12: {
1: [0], # This says that if the teacher has 12 layers and the student has 1, copy layer 0 of the teacher
2: [0, 6],
3: [0, 6, 11],
4: [0, 4, 8, 11],
6: [0, 2, 4, 7, 9, 11],
9: [0, 1, 2, 4, 5, 7, 9, 10, 11],
12: list(range(12)),
},
16: { # maps num layers in student -> which teacher layers to copy
1: [0],
2: [0, 15],
3: [0, 8, 15],
4: [0, 5, 10, 15],
6: [0, 3, 6, 9, 12, 15],
8: [0, 2, 4, 6, 8, 10, 12, 15],
9: [0, 1, 3, 5, 7, 9, 11, 13, 15],
12: [0, 1, 2, 3, 4, 5, 6, 7, 9, 11, 13, 15],
16: list(range(16)),
},
6: {1: [0], 2: [0, 5], 3: [0, 2, 5], 4: [0, 1, 3, 5], 6: list(range(6))},
}
A__ : List[Any] = {
# maps num layers in student -> which teacher layers to copy.
6: {1: [5], 2: [3, 5], 3: [1, 4, 5], 4: [1, 2, 4, 5]},
12: {1: [11], 2: [5, 11], 3: [3, 7, 11], 6: [1, 3, 5, 8, 10, 11]},
16: {1: [15], 4: [4, 9, 12, 15], 8: [1, 3, 5, 7, 9, 11, 13, 15]},
}
def a__ ( lowerCAmelCase : Dict , lowerCAmelCase : Dict ):
'''simple docstring'''
try:
UpperCAmelCase__ : Tuple = LAYERS_TO_COPY[n_teacher][n_student]
return val
except KeyError:
if n_student != n_teacher:
warnings.warn(
F"no hardcoded layers to copy for teacher {n_teacher} -> student {n_student}, defaulting to first"
F" {n_student}" )
return list(range(lowerCAmelCase ) )
def a__ ( lowerCAmelCase : int , lowerCAmelCase : Tuple ):
'''simple docstring'''
if n_student > n_teacher:
raise ValueError(F"Cannot perform intermediate supervision for student {n_student} > teacher {n_teacher}" )
elif n_teacher == n_student:
return list(range(lowerCAmelCase ) )
elif n_student == 1:
return [n_teacher - 1]
else:
return LAYERS_TO_SUPERVISE[n_teacher][n_student]
def a__ ( lowerCAmelCase : Union[str, PreTrainedModel] , lowerCAmelCase : Union[str, Path] = "student" , lowerCAmelCase : Union[int, None] = None , lowerCAmelCase : Union[int, None] = None , lowerCAmelCase : List[str]=False , lowerCAmelCase : List[str]=None , lowerCAmelCase : List[str]=None , **lowerCAmelCase : List[str] , ):
'''simple docstring'''
UpperCAmelCase__ : List[str] = "encoder_layers and decoder_layers cannot be both None-- you would just have an identical teacher."
assert (e is not None) or (d is not None), _msg
if isinstance(lowerCAmelCase , lowerCAmelCase ):
AutoTokenizer.from_pretrained(lowerCAmelCase ).save_pretrained(lowerCAmelCase ) # purely for convenience
UpperCAmelCase__ : List[str] = AutoModelForSeqaSeqLM.from_pretrained(lowerCAmelCase ).eval()
else:
assert isinstance(lowerCAmelCase , lowerCAmelCase ), F"teacher must be a model or string got type {type(lowerCAmelCase )}"
UpperCAmelCase__ : int = teacher.config.to_diff_dict()
try:
UpperCAmelCase__ , UpperCAmelCase__ : Optional[Any] = teacher.config.encoder_layers, teacher.config.decoder_layers
if e is None:
UpperCAmelCase__ : Tuple = teacher_e
if d is None:
UpperCAmelCase__ : str = teacher_d
init_kwargs.update({"encoder_layers": e, "decoder_layers": d} )
except AttributeError: # T5
if hasattr(teacher.config , "num_encoder_layers" ):
UpperCAmelCase__ , UpperCAmelCase__ : Union[str, Any] = teacher.config.num_encoder_layers, teacher.config.num_decoder_layers
else:
UpperCAmelCase__ , UpperCAmelCase__ : Union[str, Any] = teacher.config.num_layers, teacher.config.num_decoder_layers
if e is None:
UpperCAmelCase__ : Optional[Any] = teacher_e
if d is None:
UpperCAmelCase__ : Optional[Any] = teacher_d
if hasattr(teacher.config , "num_encoder_layers" ):
init_kwargs.update({"num_encoder_layers": e, "num_decoder_layers": d} )
else:
init_kwargs.update({"num_layers": e, "num_decoder_layers": d} )
# Kwargs to instantiate student: teacher kwargs with updated layer numbers + **extra_config_kwargs
init_kwargs.update(lowerCAmelCase )
# Copy weights
UpperCAmelCase__ : Tuple = teacher.config_class(**lowerCAmelCase )
UpperCAmelCase__ : List[str] = AutoModelForSeqaSeqLM.from_config(lowerCAmelCase )
# Start by copying the full teacher state dict this will copy the first N teacher layers to the student.
UpperCAmelCase__ : Optional[int] = student.load_state_dict(teacher.state_dict() , strict=lowerCAmelCase )
assert info.missing_keys == [], info.missing_keys # every student key should have a teacher keys.
if copy_first_teacher_layers: # Our copying is done. We just log and save
UpperCAmelCase__ , UpperCAmelCase__ : int = list(range(lowerCAmelCase ) ), list(range(lowerCAmelCase ) )
logger.info(
F"Copied encoder layers {e_layers_to_copy} and decoder layers {d_layers_to_copy}. Saving them to"
F" {save_path}" )
student.save_pretrained(lowerCAmelCase )
return student, e_layers_to_copy, d_layers_to_copy
# Decide which layers of the teacher to copy. Not exactly alternating -- we try to keep first and last layer.
if e_layers_to_copy is None:
UpperCAmelCase__ : List[int] = pick_layers_to_copy(lowerCAmelCase , lowerCAmelCase )
if d_layers_to_copy is None:
UpperCAmelCase__ : List[int] = pick_layers_to_copy(lowerCAmelCase , lowerCAmelCase )
try:
if hasattr(
lowerCAmelCase , "prophetnet" ): # For ProphetNet, student.model.encoder.layers is called student.prophetnet.encoder.layers
copy_layers(teacher.prophetnet.encoder.layers , student.prophetnet.encoder.layers , lowerCAmelCase )
copy_layers(teacher.prophetnet.decoder.layers , student.prophetnet.decoder.layers , lowerCAmelCase )
else:
copy_layers(teacher.model.encoder.layers , student.model.encoder.layers , lowerCAmelCase )
copy_layers(teacher.model.decoder.layers , student.model.decoder.layers , lowerCAmelCase )
except AttributeError: # For t5, student.model.encoder.layers is called student.encoder.block
copy_layers(teacher.encoder.block , student.encoder.block , lowerCAmelCase )
copy_layers(teacher.decoder.block , student.decoder.block , lowerCAmelCase )
logger.info(
F"Copied encoder layers {e_layers_to_copy} and decoder layers {d_layers_to_copy}. Saving them to {save_path}" )
UpperCAmelCase__ : int = {
"teacher_type": teacher.config.model_type,
"copied_encoder_layers": e_layers_to_copy,
"copied_decoder_layers": d_layers_to_copy,
}
student.save_pretrained(lowerCAmelCase )
# Save information about copying for easier reproducibility
return student, e_layers_to_copy, d_layers_to_copy
if __name__ == "__main__":
fire.Fire(create_student_by_copying_alternating_layers)
| 660 |
"""simple docstring"""
import unittest
from transformers import load_tool
from .test_tools_common import ToolTesterMixin
class _lowercase ( unittest.TestCase , lowerCAmelCase_ ):
'''simple docstring'''
def lowerCAmelCase__ ( self )-> Dict:
UpperCAmelCase__ : Optional[Any] = load_tool("text-classification" )
self.tool.setup()
UpperCAmelCase__ : List[str] = load_tool("text-classification" , remote=__UpperCamelCase )
def lowerCAmelCase__ ( self )-> Union[str, Any]:
UpperCAmelCase__ : Dict = self.tool("That's quite cool" , ["positive", "negative"] )
self.assertEqual(__UpperCamelCase , "positive" )
def lowerCAmelCase__ ( self )-> str:
UpperCAmelCase__ : List[Any] = self.remote_tool("That's quite cool" , ["positive", "negative"] )
self.assertEqual(__UpperCamelCase , "positive" )
def lowerCAmelCase__ ( self )-> Optional[int]:
UpperCAmelCase__ : Any = self.tool(text="That's quite cool" , labels=["positive", "negative"] )
self.assertEqual(__UpperCamelCase , "positive" )
def lowerCAmelCase__ ( self )-> str:
UpperCAmelCase__ : str = self.remote_tool(text="That's quite cool" , labels=["positive", "negative"] )
self.assertEqual(__UpperCamelCase , "positive" )
| 660 | 1 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.