code
stringlengths 82
54.1k
| code_codestyle
int64 0
699
| style_context
stringlengths 111
35.6k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
---|---|---|---|---|
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_rembert import RemBertTokenizer
else:
SCREAMING_SNAKE_CASE__ = None
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ = {'''vocab_file''': '''sentencepiece.model''', '''tokenizer_file''': '''tokenizer.json'''}
SCREAMING_SNAKE_CASE__ = {
'''vocab_file''': {
'''google/rembert''': '''https://huggingface.co/google/rembert/resolve/main/sentencepiece.model''',
},
'''tokenizer_file''': {
'''google/rembert''': '''https://huggingface.co/google/rembert/resolve/main/tokenizer.json''',
},
}
SCREAMING_SNAKE_CASE__ = {
'''google/rembert''': 256,
}
SCREAMING_SNAKE_CASE__ = '''▁'''
class _UpperCamelCase( __lowerCamelCase ):
__SCREAMING_SNAKE_CASE : Tuple = VOCAB_FILES_NAMES
__SCREAMING_SNAKE_CASE : Optional[int] = PRETRAINED_VOCAB_FILES_MAP
__SCREAMING_SNAKE_CASE : int = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__SCREAMING_SNAKE_CASE : List[str] = RemBertTokenizer
def __init__( self : Dict , SCREAMING_SNAKE_CASE__ : str=None , SCREAMING_SNAKE_CASE__ : Optional[Any]=None , SCREAMING_SNAKE_CASE__ : int=True , SCREAMING_SNAKE_CASE__ : List[Any]=True , SCREAMING_SNAKE_CASE__ : Any=False , SCREAMING_SNAKE_CASE__ : List[Any]="[CLS]" , SCREAMING_SNAKE_CASE__ : int="[SEP]" , SCREAMING_SNAKE_CASE__ : Dict="<unk>" , SCREAMING_SNAKE_CASE__ : str="[SEP]" , SCREAMING_SNAKE_CASE__ : Union[str, Any]="<pad>" , SCREAMING_SNAKE_CASE__ : Optional[Any]="[CLS]" , SCREAMING_SNAKE_CASE__ : Optional[int]="[MASK]" , **SCREAMING_SNAKE_CASE__ : Tuple , ):
'''simple docstring'''
__a : str = AddedToken(SCREAMING_SNAKE_CASE__ , lstrip=SCREAMING_SNAKE_CASE__ , rstrip=SCREAMING_SNAKE_CASE__ ) if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) else mask_token
super().__init__(
SCREAMING_SNAKE_CASE__ , tokenizer_file=SCREAMING_SNAKE_CASE__ , do_lower_case=SCREAMING_SNAKE_CASE__ , remove_space=SCREAMING_SNAKE_CASE__ , keep_accents=SCREAMING_SNAKE_CASE__ , bos_token=SCREAMING_SNAKE_CASE__ , eos_token=SCREAMING_SNAKE_CASE__ , unk_token=SCREAMING_SNAKE_CASE__ , sep_token=SCREAMING_SNAKE_CASE__ , pad_token=SCREAMING_SNAKE_CASE__ , cls_token=SCREAMING_SNAKE_CASE__ , mask_token=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ , )
__a : Optional[Any] = do_lower_case
__a : int = remove_space
__a : List[Any] = keep_accents
__a : Union[str, Any] = vocab_file
__a : List[Any] = False if not self.vocab_file else True
def __lowerCAmelCase ( self : Dict , SCREAMING_SNAKE_CASE__ : List[int] , SCREAMING_SNAKE_CASE__ : Optional[List[int]] = None ):
'''simple docstring'''
__a : str = [self.sep_token_id]
__a : List[Any] = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def __lowerCAmelCase ( self : List[Any] , SCREAMING_SNAKE_CASE__ : List[int] , SCREAMING_SNAKE_CASE__ : Optional[List[int]] = None , SCREAMING_SNAKE_CASE__ : bool = False ):
'''simple docstring'''
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
'You should not supply a second sequence if the provided sequence of '
'ids is already formatted with special tokens for the model.' )
return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a]
if token_ids_a is not None:
return [1] + ([0] * len(SCREAMING_SNAKE_CASE__ )) + [1] + ([0] * len(SCREAMING_SNAKE_CASE__ )) + [1]
return [1] + ([0] * len(SCREAMING_SNAKE_CASE__ )) + [1]
def __lowerCAmelCase ( self : Any , SCREAMING_SNAKE_CASE__ : List[int] , SCREAMING_SNAKE_CASE__ : Optional[List[int]] = None ):
'''simple docstring'''
__a : Any = [self.sep_token_id]
__a : Tuple = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __lowerCAmelCase ( self : Optional[int] , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Optional[str] = None ):
'''simple docstring'''
if not os.path.isdir(SCREAMING_SNAKE_CASE__ ):
logger.error('Vocabulary path ({}) should be a directory'.format(SCREAMING_SNAKE_CASE__ ) )
return
__a : Tuple = os.path.join(
SCREAMING_SNAKE_CASE__ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(SCREAMING_SNAKE_CASE__ ):
copyfile(self.vocab_file , SCREAMING_SNAKE_CASE__ )
return (out_vocab_file,)
| 47 |
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ = '''▁'''
SCREAMING_SNAKE_CASE__ = {'''vocab_file''': '''sentencepiece.bpe.model'''}
SCREAMING_SNAKE_CASE__ = {
'''vocab_file''': {
'''facebook/xglm-564M''': '''https://huggingface.co/facebook/xglm-564M/resolve/main/sentencepiece.bpe.model''',
}
}
SCREAMING_SNAKE_CASE__ = {
'''facebook/xglm-564M''': 2048,
}
class _UpperCamelCase( __lowerCamelCase ):
__SCREAMING_SNAKE_CASE : Optional[Any] = VOCAB_FILES_NAMES
__SCREAMING_SNAKE_CASE : Union[str, Any] = PRETRAINED_VOCAB_FILES_MAP
__SCREAMING_SNAKE_CASE : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__SCREAMING_SNAKE_CASE : Any = ['''input_ids''', '''attention_mask''']
def __init__( self : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : List[Any]="<s>" , SCREAMING_SNAKE_CASE__ : List[str]="</s>" , SCREAMING_SNAKE_CASE__ : Optional[Any]="</s>" , SCREAMING_SNAKE_CASE__ : Union[str, Any]="<s>" , SCREAMING_SNAKE_CASE__ : str="<unk>" , SCREAMING_SNAKE_CASE__ : Dict="<pad>" , SCREAMING_SNAKE_CASE__ : Optional[Dict[str, Any]] = None , **SCREAMING_SNAKE_CASE__ : List[str] , ):
'''simple docstring'''
__a : Union[str, Any] = {} if sp_model_kwargs is None else sp_model_kwargs
# Compatibility with the original tokenizer
__a : Any = 7
__a : Union[str, Any] = [f'''<madeupword{i}>''' for i in range(self.num_madeup_words )]
__a : Union[str, Any] = kwargs.get('additional_special_tokens' , [] )
kwargs["additional_special_tokens"] += [
word for word in madeup_words if word not in kwargs["additional_special_tokens"]
]
super().__init__(
bos_token=SCREAMING_SNAKE_CASE__ , eos_token=SCREAMING_SNAKE_CASE__ , unk_token=SCREAMING_SNAKE_CASE__ , sep_token=SCREAMING_SNAKE_CASE__ , cls_token=SCREAMING_SNAKE_CASE__ , pad_token=SCREAMING_SNAKE_CASE__ , sp_model_kwargs=self.sp_model_kwargs , **SCREAMING_SNAKE_CASE__ , )
__a : List[str] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(SCREAMING_SNAKE_CASE__ ) )
__a : str = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
__a : Any = 1
# Mimic fairseq token-to-id alignment for the first 4 token
__a : str = {'<s>': 0, '<pad>': 1, '</s>': 2, '<unk>': 3}
__a : List[str] = len(self.sp_model )
__a : Optional[int] = {f'''<madeupword{i}>''': sp_size + i + self.fairseq_offset for i in range(self.num_madeup_words )}
self.fairseq_tokens_to_ids.update(SCREAMING_SNAKE_CASE__ )
__a : Dict = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def __getstate__( self : List[str] ):
'''simple docstring'''
__a : Tuple = self.__dict__.copy()
__a : List[str] = None
__a : Optional[int] = self.sp_model.serialized_model_proto()
return state
def __setstate__( self : List[str] , SCREAMING_SNAKE_CASE__ : Dict ):
'''simple docstring'''
__a : int = d
# for backward compatibility
if not hasattr(self , 'sp_model_kwargs' ):
__a : Dict = {}
__a : Tuple = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
def __lowerCAmelCase ( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : List[int] , SCREAMING_SNAKE_CASE__ : Optional[List[int]] = None ):
'''simple docstring'''
if token_ids_a is None:
return [self.sep_token_id] + token_ids_a
__a : Optional[Any] = [self.sep_token_id]
return sep + token_ids_a + sep + sep + token_ids_a
def __lowerCAmelCase ( self : Dict , SCREAMING_SNAKE_CASE__ : List[int] , SCREAMING_SNAKE_CASE__ : Optional[List[int]] = None , SCREAMING_SNAKE_CASE__ : bool = False ):
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=SCREAMING_SNAKE_CASE__ , token_ids_a=SCREAMING_SNAKE_CASE__ , already_has_special_tokens=SCREAMING_SNAKE_CASE__ )
if token_ids_a is None:
return [1] + ([0] * len(SCREAMING_SNAKE_CASE__ ))
return [1] + ([0] * len(SCREAMING_SNAKE_CASE__ )) + [1, 1] + ([0] * len(SCREAMING_SNAKE_CASE__ ))
def __lowerCAmelCase ( self : List[Any] , SCREAMING_SNAKE_CASE__ : List[int] , SCREAMING_SNAKE_CASE__ : Optional[List[int]] = None ):
'''simple docstring'''
__a : Optional[int] = [self.sep_token_id]
if token_ids_a is None:
return len(sep + token_ids_a ) * [0]
return len(sep + token_ids_a + sep + sep + token_ids_a ) * [0]
@property
def __lowerCAmelCase ( self : Optional[int] ):
'''simple docstring'''
return len(self.sp_model ) + self.fairseq_offset + self.num_madeup_words
def __lowerCAmelCase ( self : Tuple ):
'''simple docstring'''
__a : str = {self.convert_ids_to_tokens(SCREAMING_SNAKE_CASE__ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __lowerCAmelCase ( self : Any , SCREAMING_SNAKE_CASE__ : str ):
'''simple docstring'''
return self.sp_model.encode(SCREAMING_SNAKE_CASE__ , out_type=SCREAMING_SNAKE_CASE__ )
def __lowerCAmelCase ( self : List[Any] , SCREAMING_SNAKE_CASE__ : List[Any] ):
'''simple docstring'''
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
__a : List[str] = self.sp_model.PieceToId(SCREAMING_SNAKE_CASE__ )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def __lowerCAmelCase ( self : Optional[int] , SCREAMING_SNAKE_CASE__ : Dict ):
'''simple docstring'''
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def __lowerCAmelCase ( self : Tuple , SCREAMING_SNAKE_CASE__ : List[Any] ):
'''simple docstring'''
__a : Optional[int] = ''.join(SCREAMING_SNAKE_CASE__ ).replace(SCREAMING_SNAKE_CASE__ , ' ' ).strip()
return out_string
def __lowerCAmelCase ( self : Dict , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Optional[str] = None ):
'''simple docstring'''
if not os.path.isdir(SCREAMING_SNAKE_CASE__ ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
__a : Any = os.path.join(
SCREAMING_SNAKE_CASE__ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(SCREAMING_SNAKE_CASE__ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , SCREAMING_SNAKE_CASE__ )
elif not os.path.isfile(self.vocab_file ):
with open(SCREAMING_SNAKE_CASE__ , 'wb' ) as fi:
__a : List[Any] = self.sp_model.serialized_model_proto()
fi.write(SCREAMING_SNAKE_CASE__ )
return (out_vocab_file,)
| 47 | 1 |
import fire
from torch.utils.data import DataLoader
from tqdm import tqdm
from transformers import AutoTokenizer
from utils import SeqaSeqDataset, pickle_save
def UpperCAmelCase__ ( lowerCamelCase_ : int , lowerCamelCase_ : List[Any] , lowerCamelCase_ : List[str]=1_0_2_4 , lowerCamelCase_ : Optional[Any]=1_0_2_4 , lowerCamelCase_ : Optional[Any]=False , **lowerCamelCase_ : str ):
__a : Optional[Any] = AutoTokenizer.from_pretrained(lowerCamelCase_ )
__a : Tuple = SeqaSeqDataset(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , type_path='train' , **lowerCamelCase_ )
__a : Dict = tok.pad_token_id
def get_lens(lowerCamelCase_ : int ):
__a : Tuple = tqdm(
DataLoader(lowerCamelCase_ , batch_size=5_1_2 , num_workers=8 , shuffle=lowerCamelCase_ , collate_fn=ds.collate_fn ) , desc=str(ds.len_file ) , )
__a : Optional[int] = []
for batch in dl:
__a : int = batch['input_ids'].ne(lowerCamelCase_ ).sum(1 ).tolist()
__a : Optional[Any] = batch['labels'].ne(lowerCamelCase_ ).sum(1 ).tolist()
if consider_target:
for src, tgt in zip(lowerCamelCase_ , lowerCamelCase_ ):
max_lens.append(max(lowerCamelCase_ , lowerCamelCase_ ) )
else:
max_lens.extend(lowerCamelCase_ )
return max_lens
__a : int = get_lens(lowerCamelCase_ )
__a : List[Any] = SeqaSeqDataset(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , type_path='val' , **lowerCamelCase_ )
__a : Any = get_lens(lowerCamelCase_ )
pickle_save(lowerCamelCase_ , train_ds.len_file )
pickle_save(lowerCamelCase_ , val_ds.len_file )
if __name__ == "__main__":
fire.Fire(save_len_file)
| 47 |
import argparse
from collections import OrderedDict
from pathlib import Path
import torch
from transformers import (
VisualBertConfig,
VisualBertForMultipleChoice,
VisualBertForPreTraining,
VisualBertForQuestionAnswering,
VisualBertForVisualReasoning,
)
from transformers.utils import logging
logging.set_verbosity_info()
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ = [
('''bert.bert''', '''visual_bert'''),
('''bert.cls''', '''cls'''),
('''bert.classifier''', '''cls'''),
('''token_type_embeddings_visual''', '''visual_token_type_embeddings'''),
('''position_embeddings_visual''', '''visual_position_embeddings'''),
('''projection''', '''visual_projection'''),
]
SCREAMING_SNAKE_CASE__ = [
'''nlvr2_coco_pre_trained.th''',
'''nlvr2_fine_tuned.th''',
'''nlvr2_pre_trained.th''',
'''vcr_coco_pre_train.th''',
'''vcr_fine_tune.th''',
'''vcr_pre_train.th''',
'''vqa_coco_pre_trained.th''',
'''vqa_fine_tuned.th''',
'''vqa_pre_trained.th''',
]
def UpperCAmelCase__ ( lowerCamelCase_ : Optional[int] ):
__a : str = torch.load(lowerCamelCase_ , map_location='cpu' )
return sd
def UpperCAmelCase__ ( lowerCamelCase_ : List[Any] , lowerCamelCase_ : int , lowerCamelCase_ : Dict=rename_keys_prefix ):
__a : Optional[Any] = OrderedDict()
__a : Any = torch.arange(config.max_position_embeddings ).expand((1, -1) )
# detector_d = OrderedDict()
for key in d:
if "detector" in key:
# detector_d[key.replace('detector.','')] = d[key]
continue
__a : List[Any] = key
for name_pair in rename_keys_prefix:
__a : List[str] = new_key.replace(name_pair[0] , name_pair[1] )
__a : Any = d[key]
if key == "bert.cls.predictions.decoder.weight":
# Old bert code didn't have `decoder.bias`, but was added separately
__a : int = new_d['cls.predictions.bias']
return new_d
@torch.no_grad()
def UpperCAmelCase__ ( lowerCamelCase_ : Any , lowerCamelCase_ : Any ):
assert (
checkpoint_path.split('/' )[-1] in ACCEPTABLE_CHECKPOINTS
), f'''The checkpoint provided must be in {ACCEPTABLE_CHECKPOINTS}.'''
# Get Config
if "pre" in checkpoint_path:
__a : Dict = 'pretraining'
if "vcr" in checkpoint_path:
__a : int = {'visual_embedding_dim': 5_1_2}
elif "vqa_advanced" in checkpoint_path:
__a : int = {'visual_embedding_dim': 2_0_4_8}
elif "vqa" in checkpoint_path:
__a : Tuple = {'visual_embedding_dim': 2_0_4_8}
elif "nlvr" in checkpoint_path:
__a : List[Any] = {'visual_embedding_dim': 1_0_2_4}
else:
raise NotImplementedError(f'''No implementation found for `{checkpoint_path}`.''' )
else:
if "vcr" in checkpoint_path:
__a : int = {'visual_embedding_dim': 5_1_2}
__a : Any = 'multichoice'
elif "vqa_advanced" in checkpoint_path:
__a : Any = {'visual_embedding_dim': 2_0_4_8}
__a : List[str] = 'vqa_advanced'
elif "vqa" in checkpoint_path:
__a : List[Any] = {'visual_embedding_dim': 2_0_4_8, 'num_labels': 3_1_2_9}
__a : List[Any] = 'vqa'
elif "nlvr" in checkpoint_path:
__a : Optional[int] = {
'visual_embedding_dim': 1_0_2_4,
'num_labels': 2,
}
__a : Optional[Any] = 'nlvr'
__a : str = VisualBertConfig(**lowerCamelCase_ )
# Load State Dict
__a : str = load_state_dict(lowerCamelCase_ )
__a : str = get_new_dict(lowerCamelCase_ , lowerCamelCase_ )
if model_type == "pretraining":
__a : Optional[Any] = VisualBertForPreTraining(lowerCamelCase_ )
elif model_type == "vqa":
__a : Any = VisualBertForQuestionAnswering(lowerCamelCase_ )
elif model_type == "nlvr":
__a : int = VisualBertForVisualReasoning(lowerCamelCase_ )
elif model_type == "multichoice":
__a : Optional[int] = VisualBertForMultipleChoice(lowerCamelCase_ )
model.load_state_dict(lowerCamelCase_ )
# Save Checkpoints
Path(lowerCamelCase_ ).mkdir(exist_ok=lowerCamelCase_ )
model.save_pretrained(lowerCamelCase_ )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument('''orig_checkpoint_path''', type=str, help='''A path to .th on local filesystem.''')
parser.add_argument('''pytorch_dump_folder_path''', type=str, help='''Path to the output PyTorch model.''')
SCREAMING_SNAKE_CASE__ = parser.parse_args()
convert_visual_bert_checkpoint(args.orig_checkpoint_path, args.pytorch_dump_folder_path)
| 47 | 1 |
import importlib.util
import os
import platform
from argparse import ArgumentParser
import huggingface_hub
from .. import __version__ as version
from ..utils import (
is_accelerate_available,
is_flax_available,
is_safetensors_available,
is_tf_available,
is_torch_available,
)
from . import BaseTransformersCLICommand
def UpperCAmelCase__ ( lowerCamelCase_ : int ):
return EnvironmentCommand()
def UpperCAmelCase__ ( lowerCamelCase_ : List[Any] ):
return EnvironmentCommand(args.accelerate_config_file )
class _UpperCamelCase( __lowerCamelCase ):
@staticmethod
def __lowerCAmelCase ( SCREAMING_SNAKE_CASE__ : ArgumentParser ):
'''simple docstring'''
__a : Any = parser.add_parser('env' )
download_parser.set_defaults(func=SCREAMING_SNAKE_CASE__ )
download_parser.add_argument(
'--accelerate-config_file' , default=SCREAMING_SNAKE_CASE__ , help='The accelerate config file to use for the default values in the launching script.' , )
download_parser.set_defaults(func=SCREAMING_SNAKE_CASE__ )
def __init__( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : List[str] , *SCREAMING_SNAKE_CASE__ : Dict ):
'''simple docstring'''
__a : Optional[Any] = accelerate_config_file
def __lowerCAmelCase ( self : Optional[Any] ):
'''simple docstring'''
__a : int = 'not installed'
if is_safetensors_available():
import safetensors
__a : Dict = safetensors.__version__
elif importlib.util.find_spec('safetensors' ) is not None:
import safetensors
__a : Tuple = f'''{safetensors.__version__} but is ignored because of PyTorch version too old.'''
__a : Dict = 'not installed'
__a : Any = 'not found'
if is_accelerate_available():
import accelerate
from accelerate.commands.config import default_config_file, load_config_from_file
__a : Tuple = accelerate.__version__
# Get the default from the config file.
if self._accelerate_config_file is not None or os.path.isfile(SCREAMING_SNAKE_CASE__ ):
__a : List[Any] = load_config_from_file(self._accelerate_config_file ).to_dict()
__a : List[Any] = (
'\n'.join([f'''\t- {prop}: {val}''' for prop, val in accelerate_config.items()] )
if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
else f'''\t{accelerate_config}'''
)
__a : str = 'not installed'
__a : Union[str, Any] = 'NA'
if is_torch_available():
import torch
__a : Union[str, Any] = torch.__version__
__a : Union[str, Any] = torch.cuda.is_available()
__a : str = 'not installed'
__a : str = 'NA'
if is_tf_available():
import tensorflow as tf
__a : Tuple = tf.__version__
try:
# deprecated in v2.1
__a : Dict = tf.test.is_gpu_available()
except AttributeError:
# returns list of devices, convert to bool
__a : Tuple = bool(tf.config.list_physical_devices('GPU' ) )
__a : str = 'not installed'
__a : Union[str, Any] = 'not installed'
__a : Optional[int] = 'not installed'
__a : int = 'NA'
if is_flax_available():
import flax
import jax
import jaxlib
__a : Optional[Any] = flax.__version__
__a : Tuple = jax.__version__
__a : Optional[Any] = jaxlib.__version__
__a : Optional[int] = jax.lib.xla_bridge.get_backend().platform
__a : str = {
'`transformers` version': version,
'Platform': platform.platform(),
'Python version': platform.python_version(),
'Huggingface_hub version': huggingface_hub.__version__,
'Safetensors version': f'''{safetensors_version}''',
'Accelerate version': f'''{accelerate_version}''',
'Accelerate config': f'''{accelerate_config_str}''',
'PyTorch version (GPU?)': f'''{pt_version} ({pt_cuda_available})''',
'Tensorflow version (GPU?)': f'''{tf_version} ({tf_cuda_available})''',
'Flax version (CPU?/GPU?/TPU?)': f'''{flax_version} ({jax_backend})''',
'Jax version': f'''{jax_version}''',
'JaxLib version': f'''{jaxlib_version}''',
'Using GPU in script?': '<fill in>',
'Using distributed or parallel set-up in script?': '<fill in>',
}
print('\nCopy-and-paste the text below in your GitHub issue and FILL OUT the two last points.\n' )
print(self.format_dict(SCREAMING_SNAKE_CASE__ ) )
return info
@staticmethod
def __lowerCAmelCase ( SCREAMING_SNAKE_CASE__ : Union[str, Any] ):
'''simple docstring'''
return "\n".join([f'''- {prop}: {val}''' for prop, val in d.items()] ) + "\n"
| 47 |
print((lambda quine: quine % quine)('''print((lambda quine: quine %% quine)(%r))'''))
| 47 | 1 |
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import ConvNextConfig, SegformerImageProcessor, UperNetConfig, UperNetForSemanticSegmentation
def UpperCAmelCase__ ( lowerCamelCase_ : Tuple ):
__a : List[str] = 3_8_4
if "tiny" in model_name:
__a : Optional[int] = [3, 3, 9, 3]
__a : Tuple = [9_6, 1_9_2, 3_8_4, 7_6_8]
if "small" in model_name:
__a : Any = [3, 3, 2_7, 3]
__a : List[Any] = [9_6, 1_9_2, 3_8_4, 7_6_8]
if "base" in model_name:
__a : str = [3, 3, 2_7, 3]
__a : Dict = [1_2_8, 2_5_6, 5_1_2, 1_0_2_4]
__a : Optional[int] = 5_1_2
if "large" in model_name:
__a : Tuple = [3, 3, 2_7, 3]
__a : str = [1_9_2, 3_8_4, 7_6_8, 1_5_3_6]
__a : Dict = 7_6_8
if "xlarge" in model_name:
__a : int = [3, 3, 2_7, 3]
__a : Optional[Any] = [2_5_6, 5_1_2, 1_0_2_4, 2_0_4_8]
__a : Optional[int] = 1_0_2_4
# set label information
__a : Tuple = 1_5_0
__a : Optional[int] = 'huggingface/label-files'
__a : Any = 'ade20k-id2label.json'
__a : Dict = json.load(open(hf_hub_download(lowerCamelCase_ , lowerCamelCase_ , repo_type='dataset' ) , 'r' ) )
__a : Union[str, Any] = {int(lowerCamelCase_ ): v for k, v in idalabel.items()}
__a : Any = {v: k for k, v in idalabel.items()}
__a : Optional[int] = ConvNextConfig(
depths=lowerCamelCase_ , hidden_sizes=lowerCamelCase_ , out_features=['stage1', 'stage2', 'stage3', 'stage4'] )
__a : Optional[int] = UperNetConfig(
backbone_config=lowerCamelCase_ , auxiliary_in_channels=lowerCamelCase_ , num_labels=lowerCamelCase_ , idalabel=lowerCamelCase_ , labelaid=lowerCamelCase_ , )
return config
def UpperCAmelCase__ ( lowerCamelCase_ : str ):
__a : int = []
# fmt: off
# stem
rename_keys.append(('backbone.downsample_layers.0.0.weight', 'backbone.embeddings.patch_embeddings.weight') )
rename_keys.append(('backbone.downsample_layers.0.0.bias', 'backbone.embeddings.patch_embeddings.bias') )
rename_keys.append(('backbone.downsample_layers.0.1.weight', 'backbone.embeddings.layernorm.weight') )
rename_keys.append(('backbone.downsample_layers.0.1.bias', 'backbone.embeddings.layernorm.bias') )
# stages
for i in range(len(config.backbone_config.depths ) ):
for j in range(config.backbone_config.depths[i] ):
rename_keys.append((f'''backbone.stages.{i}.{j}.gamma''', f'''backbone.encoder.stages.{i}.layers.{j}.layer_scale_parameter''') )
rename_keys.append((f'''backbone.stages.{i}.{j}.depthwise_conv.weight''', f'''backbone.encoder.stages.{i}.layers.{j}.dwconv.weight''') )
rename_keys.append((f'''backbone.stages.{i}.{j}.depthwise_conv.bias''', f'''backbone.encoder.stages.{i}.layers.{j}.dwconv.bias''') )
rename_keys.append((f'''backbone.stages.{i}.{j}.norm.weight''', f'''backbone.encoder.stages.{i}.layers.{j}.layernorm.weight''') )
rename_keys.append((f'''backbone.stages.{i}.{j}.norm.bias''', f'''backbone.encoder.stages.{i}.layers.{j}.layernorm.bias''') )
rename_keys.append((f'''backbone.stages.{i}.{j}.pointwise_conv1.weight''', f'''backbone.encoder.stages.{i}.layers.{j}.pwconv1.weight''') )
rename_keys.append((f'''backbone.stages.{i}.{j}.pointwise_conv1.bias''', f'''backbone.encoder.stages.{i}.layers.{j}.pwconv1.bias''') )
rename_keys.append((f'''backbone.stages.{i}.{j}.pointwise_conv2.weight''', f'''backbone.encoder.stages.{i}.layers.{j}.pwconv2.weight''') )
rename_keys.append((f'''backbone.stages.{i}.{j}.pointwise_conv2.bias''', f'''backbone.encoder.stages.{i}.layers.{j}.pwconv2.bias''') )
if i > 0:
rename_keys.append((f'''backbone.downsample_layers.{i}.0.weight''', f'''backbone.encoder.stages.{i}.downsampling_layer.0.weight''') )
rename_keys.append((f'''backbone.downsample_layers.{i}.0.bias''', f'''backbone.encoder.stages.{i}.downsampling_layer.0.bias''') )
rename_keys.append((f'''backbone.downsample_layers.{i}.1.weight''', f'''backbone.encoder.stages.{i}.downsampling_layer.1.weight''') )
rename_keys.append((f'''backbone.downsample_layers.{i}.1.bias''', f'''backbone.encoder.stages.{i}.downsampling_layer.1.bias''') )
rename_keys.append((f'''backbone.norm{i}.weight''', f'''backbone.hidden_states_norms.stage{i+1}.weight''') )
rename_keys.append((f'''backbone.norm{i}.bias''', f'''backbone.hidden_states_norms.stage{i+1}.bias''') )
# decode head
rename_keys.extend(
[
('decode_head.conv_seg.weight', 'decode_head.classifier.weight'),
('decode_head.conv_seg.bias', 'decode_head.classifier.bias'),
('auxiliary_head.conv_seg.weight', 'auxiliary_head.classifier.weight'),
('auxiliary_head.conv_seg.bias', 'auxiliary_head.classifier.bias'),
] )
# fmt: on
return rename_keys
def UpperCAmelCase__ ( lowerCamelCase_ : int , lowerCamelCase_ : Optional[Any] , lowerCamelCase_ : Dict ):
__a : List[str] = dct.pop(lowerCamelCase_ )
__a : Optional[Any] = val
def UpperCAmelCase__ ( lowerCamelCase_ : str , lowerCamelCase_ : Union[str, Any] , lowerCamelCase_ : Tuple ):
__a : str = {
'upernet-convnext-tiny': 'https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_tiny_fp16_512x512_160k_ade20k/upernet_convnext_tiny_fp16_512x512_160k_ade20k_20220227_124553-cad485de.pth',
'upernet-convnext-small': 'https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_small_fp16_512x512_160k_ade20k/upernet_convnext_small_fp16_512x512_160k_ade20k_20220227_131208-1b1e394f.pth',
'upernet-convnext-base': 'https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_base_fp16_512x512_160k_ade20k/upernet_convnext_base_fp16_512x512_160k_ade20k_20220227_181227-02a24fc6.pth',
'upernet-convnext-large': 'https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_large_fp16_640x640_160k_ade20k/upernet_convnext_large_fp16_640x640_160k_ade20k_20220226_040532-e57aa54d.pth',
'upernet-convnext-xlarge': 'https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_xlarge_fp16_640x640_160k_ade20k/upernet_convnext_xlarge_fp16_640x640_160k_ade20k_20220226_080344-95fc38c2.pth',
}
__a : Any = model_name_to_url[model_name]
__a : List[Any] = torch.hub.load_state_dict_from_url(lowerCamelCase_ , map_location='cpu' )['state_dict']
__a : Optional[int] = get_upernet_config(lowerCamelCase_ )
__a : Any = UperNetForSemanticSegmentation(lowerCamelCase_ )
model.eval()
# replace "bn" => "batch_norm"
for key in state_dict.copy().keys():
__a : Dict = state_dict.pop(lowerCamelCase_ )
if "bn" in key:
__a : Dict = key.replace('bn' , 'batch_norm' )
__a : int = val
# rename keys
__a : Dict = create_rename_keys(lowerCamelCase_ )
for src, dest in rename_keys:
rename_key(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
model.load_state_dict(lowerCamelCase_ )
# verify on image
__a : str = 'https://huggingface.co/datasets/hf-internal-testing/fixtures_ade20k/resolve/main/ADE_val_00000001.jpg'
__a : Optional[Any] = Image.open(requests.get(lowerCamelCase_ , stream=lowerCamelCase_ ).raw ).convert('RGB' )
__a : Union[str, Any] = SegformerImageProcessor()
__a : str = processor(lowerCamelCase_ , return_tensors='pt' ).pixel_values
with torch.no_grad():
__a : Union[str, Any] = model(lowerCamelCase_ )
if model_name == "upernet-convnext-tiny":
__a : Optional[Any] = torch.tensor(
[[-8.8110, -8.8110, -8.6521], [-8.8110, -8.8110, -8.6521], [-8.7746, -8.7746, -8.6130]] )
elif model_name == "upernet-convnext-small":
__a : Tuple = torch.tensor(
[[-8.8236, -8.8236, -8.6771], [-8.8236, -8.8236, -8.6771], [-8.7638, -8.7638, -8.6240]] )
elif model_name == "upernet-convnext-base":
__a : Optional[int] = torch.tensor(
[[-8.8558, -8.8558, -8.6905], [-8.8558, -8.8558, -8.6905], [-8.7669, -8.7669, -8.6021]] )
elif model_name == "upernet-convnext-large":
__a : str = torch.tensor(
[[-8.6660, -8.6660, -8.6210], [-8.6660, -8.6660, -8.6210], [-8.6310, -8.6310, -8.5964]] )
elif model_name == "upernet-convnext-xlarge":
__a : int = torch.tensor(
[[-8.4980, -8.4980, -8.3977], [-8.4980, -8.4980, -8.3977], [-8.4379, -8.4379, -8.3412]] )
print('Logits:' , outputs.logits[0, 0, :3, :3] )
assert torch.allclose(outputs.logits[0, 0, :3, :3] , lowerCamelCase_ , atol=1e-4 )
print('Looks ok!' )
if pytorch_dump_folder_path is not None:
print(f'''Saving model {model_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(lowerCamelCase_ )
print(f'''Saving processor to {pytorch_dump_folder_path}''' )
processor.save_pretrained(lowerCamelCase_ )
if push_to_hub:
print(f'''Pushing model and processor for {model_name} to hub''' )
model.push_to_hub(f'''openmmlab/{model_name}''' )
processor.push_to_hub(f'''openmmlab/{model_name}''' )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default='''upernet-convnext-tiny''',
type=str,
choices=[F"upernet-convnext-{size}" for size in ['''tiny''', '''small''', '''base''', '''large''', '''xlarge''']],
help='''Name of the ConvNext UperNet model you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
parser.add_argument(
'''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.'''
)
SCREAMING_SNAKE_CASE__ = parser.parse_args()
convert_upernet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 47 |
import json
import os
import shutil
import tempfile
from unittest import TestCase
from transformers import BartTokenizer, BartTokenizerFast, DPRQuestionEncoderTokenizer, DPRQuestionEncoderTokenizerFast
from transformers.models.bart.configuration_bart import BartConfig
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES as DPR_VOCAB_FILES_NAMES
from transformers.models.dpr.configuration_dpr import DPRConfig
from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES as BART_VOCAB_FILES_NAMES
from transformers.testing_utils import require_faiss, require_tokenizers, require_torch, slow
from transformers.utils import is_datasets_available, is_faiss_available, is_torch_available
if is_torch_available() and is_datasets_available() and is_faiss_available():
from transformers.models.rag.configuration_rag import RagConfig
from transformers.models.rag.tokenization_rag import RagTokenizer
@require_faiss
@require_torch
class _UpperCamelCase( __lowerCamelCase ):
def __lowerCAmelCase ( self : List[Any] ):
'''simple docstring'''
__a : List[Any] = tempfile.mkdtemp()
__a : int = 8
# DPR tok
__a : Dict = [
'[UNK]',
'[CLS]',
'[SEP]',
'[PAD]',
'[MASK]',
'want',
'##want',
'##ed',
'wa',
'un',
'runn',
'##ing',
',',
'low',
'lowest',
]
__a : int = os.path.join(self.tmpdirname , 'dpr_tokenizer' )
os.makedirs(SCREAMING_SNAKE_CASE__ , exist_ok=SCREAMING_SNAKE_CASE__ )
__a : Dict = os.path.join(SCREAMING_SNAKE_CASE__ , DPR_VOCAB_FILES_NAMES['vocab_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) )
# BART tok
__a : str = [
'l',
'o',
'w',
'e',
'r',
's',
't',
'i',
'd',
'n',
'\u0120',
'\u0120l',
'\u0120n',
'\u0120lo',
'\u0120low',
'er',
'\u0120lowest',
'\u0120newer',
'\u0120wider',
'<unk>',
]
__a : Optional[int] = dict(zip(SCREAMING_SNAKE_CASE__ , range(len(SCREAMING_SNAKE_CASE__ ) ) ) )
__a : List[str] = ['#version: 0.2', '\u0120 l', '\u0120l o', '\u0120lo w', 'e r', '']
__a : List[str] = {'unk_token': '<unk>'}
__a : Dict = os.path.join(self.tmpdirname , 'bart_tokenizer' )
os.makedirs(SCREAMING_SNAKE_CASE__ , exist_ok=SCREAMING_SNAKE_CASE__ )
__a : List[Any] = os.path.join(SCREAMING_SNAKE_CASE__ , BART_VOCAB_FILES_NAMES['vocab_file'] )
__a : Dict = os.path.join(SCREAMING_SNAKE_CASE__ , BART_VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp:
fp.write(json.dumps(SCREAMING_SNAKE_CASE__ ) + '\n' )
with open(self.merges_file , 'w' , encoding='utf-8' ) as fp:
fp.write('\n'.join(SCREAMING_SNAKE_CASE__ ) )
def __lowerCAmelCase ( self : Tuple ):
'''simple docstring'''
return DPRQuestionEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname , 'dpr_tokenizer' ) )
def __lowerCAmelCase ( self : str ):
'''simple docstring'''
return BartTokenizer.from_pretrained(os.path.join(self.tmpdirname , 'bart_tokenizer' ) )
def __lowerCAmelCase ( self : Union[str, Any] ):
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
@require_tokenizers
def __lowerCAmelCase ( self : Dict ):
'''simple docstring'''
__a : Tuple = os.path.join(self.tmpdirname , 'rag_tokenizer' )
__a : Optional[Any] = RagConfig(question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() )
__a : Optional[Any] = RagTokenizer(question_encoder=self.get_dpr_tokenizer() , generator=self.get_bart_tokenizer() )
rag_config.save_pretrained(SCREAMING_SNAKE_CASE__ )
rag_tokenizer.save_pretrained(SCREAMING_SNAKE_CASE__ )
__a : List[Any] = RagTokenizer.from_pretrained(SCREAMING_SNAKE_CASE__ , config=SCREAMING_SNAKE_CASE__ )
self.assertIsInstance(new_rag_tokenizer.question_encoder , SCREAMING_SNAKE_CASE__ )
self.assertEqual(new_rag_tokenizer.question_encoder.get_vocab() , rag_tokenizer.question_encoder.get_vocab() )
self.assertIsInstance(new_rag_tokenizer.generator , SCREAMING_SNAKE_CASE__ )
self.assertEqual(new_rag_tokenizer.generator.get_vocab() , rag_tokenizer.generator.get_vocab() )
@slow
def __lowerCAmelCase ( self : Optional[Any] ):
'''simple docstring'''
__a : Optional[Any] = RagTokenizer.from_pretrained('facebook/rag-token-nq' )
__a : List[Any] = [
'who got the first nobel prize in physics',
'when is the next deadpool movie being released',
'which mode is used for short wave broadcast service',
'who is the owner of reading football club',
'when is the next scandal episode coming out',
'when is the last time the philadelphia won the superbowl',
'what is the most current adobe flash player version',
'how many episodes are there in dragon ball z',
'what is the first step in the evolution of the eye',
'where is gall bladder situated in human body',
'what is the main mineral in lithium batteries',
'who is the president of usa right now',
'where do the greasers live in the outsiders',
'panda is a national animal of which country',
'what is the name of manchester united stadium',
]
__a : Tuple = tokenizer(SCREAMING_SNAKE_CASE__ )
self.assertIsNotNone(SCREAMING_SNAKE_CASE__ )
@slow
def __lowerCAmelCase ( self : Optional[int] ):
'''simple docstring'''
__a : Any = RagTokenizer.from_pretrained('facebook/rag-sequence-nq' )
__a : Union[str, Any] = [
'who got the first nobel prize in physics',
'when is the next deadpool movie being released',
'which mode is used for short wave broadcast service',
'who is the owner of reading football club',
'when is the next scandal episode coming out',
'when is the last time the philadelphia won the superbowl',
'what is the most current adobe flash player version',
'how many episodes are there in dragon ball z',
'what is the first step in the evolution of the eye',
'where is gall bladder situated in human body',
'what is the main mineral in lithium batteries',
'who is the president of usa right now',
'where do the greasers live in the outsiders',
'panda is a national animal of which country',
'what is the name of manchester united stadium',
]
__a : str = tokenizer(SCREAMING_SNAKE_CASE__ )
self.assertIsNotNone(SCREAMING_SNAKE_CASE__ )
| 47 | 1 |
import argparse
import numpy as np
import torch
from transformers import SpeechTaHifiGan, SpeechTaHifiGanConfig, logging
logging.set_verbosity_info()
SCREAMING_SNAKE_CASE__ = logging.get_logger('''transformers.models.speecht5''')
def UpperCAmelCase__ ( lowerCamelCase_ : Dict , lowerCamelCase_ : Tuple , lowerCamelCase_ : Union[str, Any] ):
hf_model.apply_weight_norm()
__a : int = checkpoint['input_conv.weight_g']
__a : Optional[Any] = checkpoint['input_conv.weight_v']
__a : str = checkpoint['input_conv.bias']
for i in range(len(config.upsample_rates ) ):
__a : Tuple = checkpoint[f'''upsamples.{i}.1.weight_g''']
__a : Tuple = checkpoint[f'''upsamples.{i}.1.weight_v''']
__a : Optional[Any] = checkpoint[f'''upsamples.{i}.1.bias''']
for i in range(len(config.upsample_rates ) * len(config.resblock_kernel_sizes ) ):
for j in range(len(config.resblock_dilation_sizes ) ):
__a : Optional[Any] = checkpoint[f'''blocks.{i}.convs1.{j}.1.weight_g''']
__a : Tuple = checkpoint[f'''blocks.{i}.convs1.{j}.1.weight_v''']
__a : int = checkpoint[f'''blocks.{i}.convs1.{j}.1.bias''']
__a : List[str] = checkpoint[f'''blocks.{i}.convs2.{j}.1.weight_g''']
__a : str = checkpoint[f'''blocks.{i}.convs2.{j}.1.weight_v''']
__a : Optional[Any] = checkpoint[f'''blocks.{i}.convs2.{j}.1.bias''']
__a : List[str] = checkpoint['output_conv.1.weight_g']
__a : int = checkpoint['output_conv.1.weight_v']
__a : List[str] = checkpoint['output_conv.1.bias']
hf_model.remove_weight_norm()
@torch.no_grad()
def UpperCAmelCase__ ( lowerCamelCase_ : Dict , lowerCamelCase_ : int , lowerCamelCase_ : Dict , lowerCamelCase_ : Dict=None , lowerCamelCase_ : Optional[Any]=None , ):
if config_path is not None:
__a : List[Any] = SpeechTaHifiGanConfig.from_pretrained(lowerCamelCase_ )
else:
__a : Optional[Any] = SpeechTaHifiGanConfig()
__a : Tuple = SpeechTaHifiGan(lowerCamelCase_ )
__a : Optional[int] = torch.load(lowerCamelCase_ )
load_weights(orig_checkpoint['model']['generator'] , lowerCamelCase_ , lowerCamelCase_ )
__a : List[Any] = np.load(lowerCamelCase_ )
__a : Optional[Any] = stats[0].reshape(-1 )
__a : str = stats[1].reshape(-1 )
__a : Union[str, Any] = torch.from_numpy(lowerCamelCase_ ).float()
__a : Optional[Any] = torch.from_numpy(lowerCamelCase_ ).float()
model.save_pretrained(lowerCamelCase_ )
if repo_id:
print('Pushing to the hub...' )
model.push_to_hub(lowerCamelCase_ )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ = argparse.ArgumentParser()
parser.add_argument('''--checkpoint_path''', required=True, default=None, type=str, help='''Path to original checkpoint''')
parser.add_argument('''--stats_path''', required=True, default=None, type=str, help='''Path to stats.npy file''')
parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''')
parser.add_argument(
'''--pytorch_dump_folder_path''', required=True, default=None, type=str, help='''Path to the output PyTorch model.'''
)
parser.add_argument(
'''--push_to_hub''', default=None, type=str, help='''Where to upload the converted model on the 🤗 hub.'''
)
SCREAMING_SNAKE_CASE__ = parser.parse_args()
convert_hifigan_checkpoint(
args.checkpoint_path,
args.stats_path,
args.pytorch_dump_folder_path,
args.config_path,
args.push_to_hub,
)
| 47 |
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ = {'''vocab_file''': '''spiece.model'''}
SCREAMING_SNAKE_CASE__ = {
'''vocab_file''': {
'''bert_for_seq_generation''': (
'''https://huggingface.co/google/bert_for_seq_generation_L-24_bbc_encoder/resolve/main/spiece.model'''
),
}
}
SCREAMING_SNAKE_CASE__ = {'''bert_for_seq_generation''': 512}
class _UpperCamelCase( __lowerCamelCase ):
__SCREAMING_SNAKE_CASE : int = VOCAB_FILES_NAMES
__SCREAMING_SNAKE_CASE : List[str] = PRETRAINED_VOCAB_FILES_MAP
__SCREAMING_SNAKE_CASE : Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__SCREAMING_SNAKE_CASE : List[int] = []
__SCREAMING_SNAKE_CASE : int = ['''input_ids''', '''attention_mask''']
def __init__( self : str , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : List[Any]="<s>" , SCREAMING_SNAKE_CASE__ : Tuple="</s>" , SCREAMING_SNAKE_CASE__ : Any="<unk>" , SCREAMING_SNAKE_CASE__ : int="<pad>" , SCREAMING_SNAKE_CASE__ : List[str]="<::::>" , SCREAMING_SNAKE_CASE__ : Optional[Dict[str, Any]] = None , **SCREAMING_SNAKE_CASE__ : Tuple , ):
'''simple docstring'''
__a : Tuple = {} if sp_model_kwargs is None else sp_model_kwargs
# Add extra_ids to the special token list
super().__init__(
bos_token=SCREAMING_SNAKE_CASE__ , eos_token=SCREAMING_SNAKE_CASE__ , unk_token=SCREAMING_SNAKE_CASE__ , pad_token=SCREAMING_SNAKE_CASE__ , sep_token=SCREAMING_SNAKE_CASE__ , sp_model_kwargs=self.sp_model_kwargs , **SCREAMING_SNAKE_CASE__ , )
__a : int = vocab_file
__a : Union[str, Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(SCREAMING_SNAKE_CASE__ )
@property
def __lowerCAmelCase ( self : Optional[int] ):
'''simple docstring'''
return self.sp_model.get_piece_size()
def __lowerCAmelCase ( self : int ):
'''simple docstring'''
__a : Dict = {self.convert_ids_to_tokens(SCREAMING_SNAKE_CASE__ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : Optional[Any] ):
'''simple docstring'''
__a : Union[str, Any] = self.__dict__.copy()
__a : Any = None
return state
def __setstate__( self : int , SCREAMING_SNAKE_CASE__ : List[str] ):
'''simple docstring'''
__a : str = d
# for backward compatibility
if not hasattr(self , 'sp_model_kwargs' ):
__a : str = {}
__a : List[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def __lowerCAmelCase ( self : Dict , SCREAMING_SNAKE_CASE__ : str ):
'''simple docstring'''
return self.sp_model.encode(SCREAMING_SNAKE_CASE__ , out_type=SCREAMING_SNAKE_CASE__ )
def __lowerCAmelCase ( self : str , SCREAMING_SNAKE_CASE__ : Optional[int] ):
'''simple docstring'''
return self.sp_model.piece_to_id(SCREAMING_SNAKE_CASE__ )
def __lowerCAmelCase ( self : int , SCREAMING_SNAKE_CASE__ : str ):
'''simple docstring'''
__a : int = self.sp_model.IdToPiece(SCREAMING_SNAKE_CASE__ )
return token
def __lowerCAmelCase ( self : Tuple , SCREAMING_SNAKE_CASE__ : Optional[int] ):
'''simple docstring'''
__a : Optional[Any] = []
__a : Optional[int] = ''
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(SCREAMING_SNAKE_CASE__ ) + token
__a : Dict = []
else:
current_sub_tokens.append(SCREAMING_SNAKE_CASE__ )
out_string += self.sp_model.decode(SCREAMING_SNAKE_CASE__ )
return out_string.strip()
def __lowerCAmelCase ( self : List[Any] , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Optional[str] = None ):
'''simple docstring'''
if not os.path.isdir(SCREAMING_SNAKE_CASE__ ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
__a : Tuple = os.path.join(
SCREAMING_SNAKE_CASE__ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(SCREAMING_SNAKE_CASE__ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , SCREAMING_SNAKE_CASE__ )
elif not os.path.isfile(self.vocab_file ):
with open(SCREAMING_SNAKE_CASE__ , 'wb' ) as fi:
__a : List[str] = self.sp_model.serialized_model_proto()
fi.write(SCREAMING_SNAKE_CASE__ )
return (out_vocab_file,)
| 47 | 1 |
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxSeqaSeqConfigWithPast
from ...utils import logging
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ = {
'''google/umt5-small''': '''https://huggingface.co/google/umt5-small/resolve/main/config.json''',
# See all umt5 models at https://huggingface.co/models?filter=umt5
}
class _UpperCamelCase( __lowerCamelCase ):
__SCREAMING_SNAKE_CASE : Union[str, Any] = '''umt5'''
__SCREAMING_SNAKE_CASE : List[Any] = ['''past_key_values''']
def __init__( self : Dict , SCREAMING_SNAKE_CASE__ : str=2_5_0_1_1_2 , SCREAMING_SNAKE_CASE__ : Optional[int]=5_1_2 , SCREAMING_SNAKE_CASE__ : Tuple=6_4 , SCREAMING_SNAKE_CASE__ : str=1_0_2_4 , SCREAMING_SNAKE_CASE__ : List[str]=8 , SCREAMING_SNAKE_CASE__ : Dict=None , SCREAMING_SNAKE_CASE__ : List[str]=6 , SCREAMING_SNAKE_CASE__ : Any=3_2 , SCREAMING_SNAKE_CASE__ : int=1_2_8 , SCREAMING_SNAKE_CASE__ : Tuple=0.1 , SCREAMING_SNAKE_CASE__ : Optional[Any]=1e-6 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=1.0 , SCREAMING_SNAKE_CASE__ : Optional[Any]="gated-gelu" , SCREAMING_SNAKE_CASE__ : Tuple=True , SCREAMING_SNAKE_CASE__ : List[Any]=True , SCREAMING_SNAKE_CASE__ : List[str]="T5Tokenizer" , SCREAMING_SNAKE_CASE__ : int=True , SCREAMING_SNAKE_CASE__ : int=0 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=1 , SCREAMING_SNAKE_CASE__ : List[str]=0 , **SCREAMING_SNAKE_CASE__ : int , ):
'''simple docstring'''
super().__init__(
is_encoder_decoder=SCREAMING_SNAKE_CASE__ , tokenizer_class=SCREAMING_SNAKE_CASE__ , tie_word_embeddings=SCREAMING_SNAKE_CASE__ , pad_token_id=SCREAMING_SNAKE_CASE__ , eos_token_id=SCREAMING_SNAKE_CASE__ , decoder_start_token_id=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ , )
__a : str = vocab_size
__a : Optional[Any] = d_model
__a : Union[str, Any] = d_kv
__a : List[Any] = d_ff
__a : Tuple = num_layers
__a : Optional[int] = (
num_decoder_layers if num_decoder_layers is not None else self.num_layers
) # default = symmetry
__a : Optional[int] = num_heads
__a : List[Any] = relative_attention_num_buckets
__a : Any = relative_attention_max_distance
__a : List[Any] = dropout_rate
__a : int = layer_norm_epsilon
__a : str = initializer_factor
__a : int = feed_forward_proj
__a : List[str] = use_cache
__a : Dict = self.feed_forward_proj.split('-' )
__a : Tuple = act_info[-1]
__a : List[str] = act_info[0] == 'gated'
if len(SCREAMING_SNAKE_CASE__ ) > 1 and act_info[0] != "gated" or len(SCREAMING_SNAKE_CASE__ ) > 2:
raise ValueError(
f'''`feed_forward_proj`: {feed_forward_proj} is not a valid activation function of the dense layer.'''
'Please make sure `feed_forward_proj` is of the format `gated-{ACT_FN}` or `{ACT_FN}`, e.g. '
'\'gated-gelu\' or \'relu\'' )
if feed_forward_proj == "gated-gelu":
__a : Union[str, Any] = 'gelu_new'
@property
def __lowerCAmelCase ( self : Optional[Any] ):
'''simple docstring'''
return self.d_model
@property
def __lowerCAmelCase ( self : str ):
'''simple docstring'''
return self.num_heads
@property
def __lowerCAmelCase ( self : Dict ):
'''simple docstring'''
return self.num_layers
class _UpperCamelCase( __lowerCamelCase ):
@property
# Copied from transformers.models.t5.configuration_t5.T5OnnxConfig.inputs
def __lowerCAmelCase ( self : Any ):
'''simple docstring'''
__a : str = {
'input_ids': {0: 'batch', 1: 'encoder_sequence'},
'attention_mask': {0: 'batch', 1: 'encoder_sequence'},
}
if self.use_past:
__a : Optional[int] = 'past_encoder_sequence + sequence'
__a : int = {0: 'batch'}
__a : str = {0: 'batch', 1: 'past_decoder_sequence + sequence'}
else:
__a : Tuple = {0: 'batch', 1: 'decoder_sequence'}
__a : str = {0: 'batch', 1: 'decoder_sequence'}
if self.use_past:
self.fill_with_past_key_values_(SCREAMING_SNAKE_CASE__ , direction='inputs' )
return common_inputs
@property
# Copied from transformers.models.t5.configuration_t5.T5OnnxConfig.default_onnx_opset
def __lowerCAmelCase ( self : Union[str, Any] ):
'''simple docstring'''
return 1_3
@property
def __lowerCAmelCase ( self : Any ):
'''simple docstring'''
return 5e-4
| 47 |
from ..utils import DummyObject, requires_backends
class _UpperCamelCase( metaclass=__lowerCamelCase ):
__SCREAMING_SNAKE_CASE : Optional[Any] = ['''torch''', '''transformers''', '''onnx''']
def __init__( self : Dict , *SCREAMING_SNAKE_CASE__ : Any , **SCREAMING_SNAKE_CASE__ : Any ):
'''simple docstring'''
requires_backends(self , ['torch', 'transformers', 'onnx'] )
@classmethod
def __lowerCAmelCase ( cls : Tuple , *SCREAMING_SNAKE_CASE__ : Optional[Any] , **SCREAMING_SNAKE_CASE__ : Optional[int] ):
'''simple docstring'''
requires_backends(cls , ['torch', 'transformers', 'onnx'] )
@classmethod
def __lowerCAmelCase ( cls : Any , *SCREAMING_SNAKE_CASE__ : Optional[Any] , **SCREAMING_SNAKE_CASE__ : Tuple ):
'''simple docstring'''
requires_backends(cls , ['torch', 'transformers', 'onnx'] )
class _UpperCamelCase( metaclass=__lowerCamelCase ):
__SCREAMING_SNAKE_CASE : List[Any] = ['''torch''', '''transformers''', '''onnx''']
def __init__( self : Any , *SCREAMING_SNAKE_CASE__ : int , **SCREAMING_SNAKE_CASE__ : List[Any] ):
'''simple docstring'''
requires_backends(self , ['torch', 'transformers', 'onnx'] )
@classmethod
def __lowerCAmelCase ( cls : List[str] , *SCREAMING_SNAKE_CASE__ : Tuple , **SCREAMING_SNAKE_CASE__ : int ):
'''simple docstring'''
requires_backends(cls , ['torch', 'transformers', 'onnx'] )
@classmethod
def __lowerCAmelCase ( cls : str , *SCREAMING_SNAKE_CASE__ : int , **SCREAMING_SNAKE_CASE__ : List[Any] ):
'''simple docstring'''
requires_backends(cls , ['torch', 'transformers', 'onnx'] )
class _UpperCamelCase( metaclass=__lowerCamelCase ):
__SCREAMING_SNAKE_CASE : Any = ['''torch''', '''transformers''', '''onnx''']
def __init__( self : Tuple , *SCREAMING_SNAKE_CASE__ : List[str] , **SCREAMING_SNAKE_CASE__ : List[Any] ):
'''simple docstring'''
requires_backends(self , ['torch', 'transformers', 'onnx'] )
@classmethod
def __lowerCAmelCase ( cls : str , *SCREAMING_SNAKE_CASE__ : Any , **SCREAMING_SNAKE_CASE__ : Union[str, Any] ):
'''simple docstring'''
requires_backends(cls , ['torch', 'transformers', 'onnx'] )
@classmethod
def __lowerCAmelCase ( cls : Dict , *SCREAMING_SNAKE_CASE__ : List[Any] , **SCREAMING_SNAKE_CASE__ : int ):
'''simple docstring'''
requires_backends(cls , ['torch', 'transformers', 'onnx'] )
class _UpperCamelCase( metaclass=__lowerCamelCase ):
__SCREAMING_SNAKE_CASE : Dict = ['''torch''', '''transformers''', '''onnx''']
def __init__( self : Any , *SCREAMING_SNAKE_CASE__ : Dict , **SCREAMING_SNAKE_CASE__ : int ):
'''simple docstring'''
requires_backends(self , ['torch', 'transformers', 'onnx'] )
@classmethod
def __lowerCAmelCase ( cls : int , *SCREAMING_SNAKE_CASE__ : Tuple , **SCREAMING_SNAKE_CASE__ : Tuple ):
'''simple docstring'''
requires_backends(cls , ['torch', 'transformers', 'onnx'] )
@classmethod
def __lowerCAmelCase ( cls : Any , *SCREAMING_SNAKE_CASE__ : int , **SCREAMING_SNAKE_CASE__ : str ):
'''simple docstring'''
requires_backends(cls , ['torch', 'transformers', 'onnx'] )
class _UpperCamelCase( metaclass=__lowerCamelCase ):
__SCREAMING_SNAKE_CASE : int = ['''torch''', '''transformers''', '''onnx''']
def __init__( self : List[str] , *SCREAMING_SNAKE_CASE__ : Tuple , **SCREAMING_SNAKE_CASE__ : List[Any] ):
'''simple docstring'''
requires_backends(self , ['torch', 'transformers', 'onnx'] )
@classmethod
def __lowerCAmelCase ( cls : int , *SCREAMING_SNAKE_CASE__ : Tuple , **SCREAMING_SNAKE_CASE__ : Union[str, Any] ):
'''simple docstring'''
requires_backends(cls , ['torch', 'transformers', 'onnx'] )
@classmethod
def __lowerCAmelCase ( cls : Optional[Any] , *SCREAMING_SNAKE_CASE__ : List[Any] , **SCREAMING_SNAKE_CASE__ : int ):
'''simple docstring'''
requires_backends(cls , ['torch', 'transformers', 'onnx'] )
class _UpperCamelCase( metaclass=__lowerCamelCase ):
__SCREAMING_SNAKE_CASE : Optional[int] = ['''torch''', '''transformers''', '''onnx''']
def __init__( self : Optional[int] , *SCREAMING_SNAKE_CASE__ : Dict , **SCREAMING_SNAKE_CASE__ : Any ):
'''simple docstring'''
requires_backends(self , ['torch', 'transformers', 'onnx'] )
@classmethod
def __lowerCAmelCase ( cls : Optional[Any] , *SCREAMING_SNAKE_CASE__ : Tuple , **SCREAMING_SNAKE_CASE__ : Union[str, Any] ):
'''simple docstring'''
requires_backends(cls , ['torch', 'transformers', 'onnx'] )
@classmethod
def __lowerCAmelCase ( cls : int , *SCREAMING_SNAKE_CASE__ : Optional[Any] , **SCREAMING_SNAKE_CASE__ : Tuple ):
'''simple docstring'''
requires_backends(cls , ['torch', 'transformers', 'onnx'] )
| 47 | 1 |
import os
import re
import unicodedata
from shutil import copyfile
from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple, Union
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import is_torch_available, logging
if is_torch_available():
import torch
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ = {'''vocab_file''': '''spiece.model'''}
SCREAMING_SNAKE_CASE__ = {
'''vocab_file''': {
'''AI-Sweden/gpt-sw3-126m''': '''https://huggingface.co/AI-Sweden/gpt-sw3-126m/resolve/main/spiece.model''',
'''AI-Sweden/gpt-sw3-350m''': '''https://huggingface.co/AI-Sweden/gpt-sw3-350m/resolve/main/spiece.model''',
'''AI-Sweden/gpt-sw3-1.6b''': '''https://huggingface.co/AI-Sweden/gpt-sw3-1.6b/resolve/main/spiece.model''',
'''AI-Sweden/gpt-sw3-6.7b''': '''https://huggingface.co/AI-Sweden/gpt-sw3-6.7b/resolve/main/spiece.model''',
'''AI-Sweden/gpt-sw3-20b''': '''https://huggingface.co/AI-Sweden/gpt-sw3-20b/resolve/main/spiece.model''',
}
}
SCREAMING_SNAKE_CASE__ = {
'''AI-Sweden/gpt-sw3-126m''': 2048,
'''AI-Sweden/gpt-sw3-350m''': 2048,
'''AI-Sweden/gpt-sw3-1.6b''': 2048,
'''AI-Sweden/gpt-sw3-6.7b''': 2048,
'''AI-Sweden/gpt-sw3-20b''': 2048,
}
class _UpperCamelCase( __lowerCamelCase ):
__SCREAMING_SNAKE_CASE : Any = VOCAB_FILES_NAMES
__SCREAMING_SNAKE_CASE : List[Any] = PRETRAINED_VOCAB_FILES_MAP
__SCREAMING_SNAKE_CASE : Tuple = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__SCREAMING_SNAKE_CASE : Any = ['''input_ids''', '''attention_mask''']
def __init__( self : Union[str, Any] , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Optional[Any]=False , SCREAMING_SNAKE_CASE__ : Optional[int]=False , SCREAMING_SNAKE_CASE__ : Optional[int]=False , SCREAMING_SNAKE_CASE__ : str=None , SCREAMING_SNAKE_CASE__ : Optional[Any]=None , SCREAMING_SNAKE_CASE__ : Optional[int]=None , SCREAMING_SNAKE_CASE__ : List[str]=None , SCREAMING_SNAKE_CASE__ : Optional[Dict[str, Any]] = None , **SCREAMING_SNAKE_CASE__ : List[Any] , ):
'''simple docstring'''
__a : Union[str, Any] = {} if sp_model_kwargs is None else sp_model_kwargs
__a : List[Any] = kwargs.get('name_or_path' )
if name_or_path is None:
logger.warning(
'name_or_path not provided, will work for all GPTSw3 models except gpt-sw3-7b,'
' you are testing the model, this can safely be ignored' )
__a : str = 'None'
# Default definitions for our 2 tokenizer versions, with None-checks to enable proper testing
__a : List[Any] = '<|endoftext|>' if eos_token is None else eos_token
__a : List[str] = '<unk>' if unk_token is None else unk_token
if "gpt-sw3-7b" in name_or_path:
__a : str = unk_token if pad_token is None else pad_token
__a : List[Any] = eos_token if bos_token is None else bos_token
else:
__a : Optional[Any] = '<pad>' if pad_token is None else pad_token
__a : Tuple = '<s>' if bos_token is None else bos_token
super().__init__(
do_lower_case=SCREAMING_SNAKE_CASE__ , remove_space=SCREAMING_SNAKE_CASE__ , keep_accents=SCREAMING_SNAKE_CASE__ , bos_token=SCREAMING_SNAKE_CASE__ , eos_token=SCREAMING_SNAKE_CASE__ , unk_token=SCREAMING_SNAKE_CASE__ , pad_token=SCREAMING_SNAKE_CASE__ , sp_model_kwargs=self.sp_model_kwargs , **SCREAMING_SNAKE_CASE__ , )
__a : str = do_lower_case
__a : Dict = remove_space
__a : Any = keep_accents
__a : Optional[Any] = vocab_file
__a : List[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(SCREAMING_SNAKE_CASE__ )
# Used for whitespace normalization in input texts
# fmt : off
__a : int = {' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', '', ''}
# fmt : on
# Regular expression to remove non-printing characters (e.g. some unicode control chars) in preprocessing
__a : List[Any] = re.compile(
f'''[{"".join(map(SCREAMING_SNAKE_CASE__ , list(range(0 , 9 ) ) + list(range(1_1 , 3_2 ) ) + list(range(1_2_7 , 1_6_0 ) ) + [1_6_0, 1_7_3, 8_2_0_3] ) )}]''' )
def __getstate__( self : Optional[int] ):
'''simple docstring'''
__a : List[Any] = self.__dict__.copy()
__a : Union[str, Any] = None
return state
def __setstate__( self : Any , SCREAMING_SNAKE_CASE__ : int ):
'''simple docstring'''
__a : Union[str, Any] = d
# for backward compatibility
if not hasattr(self , 'sp_model_kwargs' ):
__a : Dict = {}
__a : Union[str, Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
@property
# Copied from transformers.models.albert.tokenization_albert.AlbertTokenizer.vocab_size
def __lowerCAmelCase ( self : Union[str, Any] ):
'''simple docstring'''
return len(self.sp_model )
def __lowerCAmelCase ( self : Union[str, Any] , SCREAMING_SNAKE_CASE__ : str ):
'''simple docstring'''
__a : Optional[Any] = self.non_printing_characters_re.sub('' , SCREAMING_SNAKE_CASE__ )
# Normalize whitespaces
__a : int = ''.join([char if char not in self.whitespaces else ' ' for char in text] )
# NFC Unicode normalization
__a : Union[str, Any] = unicodedata.normalize('NFC' , SCREAMING_SNAKE_CASE__ )
return text
def __lowerCAmelCase ( self : List[Any] , SCREAMING_SNAKE_CASE__ : str , **SCREAMING_SNAKE_CASE__ : str ):
'''simple docstring'''
__a : Tuple = self.preprocess_text(SCREAMING_SNAKE_CASE__ )
return self.sp_model.encode(SCREAMING_SNAKE_CASE__ , out_type=SCREAMING_SNAKE_CASE__ )
def __lowerCAmelCase ( self : Dict , SCREAMING_SNAKE_CASE__ : str ):
'''simple docstring'''
return self.sp_model.PieceToId(SCREAMING_SNAKE_CASE__ )
def __lowerCAmelCase ( self : List[Any] , SCREAMING_SNAKE_CASE__ : int ):
'''simple docstring'''
return self.sp_model.IdToPiece(SCREAMING_SNAKE_CASE__ )
@staticmethod
def __lowerCAmelCase ( SCREAMING_SNAKE_CASE__ : str ):
'''simple docstring'''
return out_string
def __lowerCAmelCase ( self : List[Any] , SCREAMING_SNAKE_CASE__ : List[str] ):
'''simple docstring'''
__a : Any = []
__a : List[Any] = ''
__a : Optional[int] = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
# TODO: Check if this is needed, as it ensures that decode(encode(doc)) != doc by adding extra whitespace in the decoded document
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(SCREAMING_SNAKE_CASE__ ) + token
__a : int = True
__a : Tuple = []
else:
current_sub_tokens.append(SCREAMING_SNAKE_CASE__ )
__a : str = False
out_string += self.sp_model.decode(SCREAMING_SNAKE_CASE__ )
return out_string
def __lowerCAmelCase ( self : Dict ):
'''simple docstring'''
__a : Optional[Any] = {self.convert_ids_to_tokens(SCREAMING_SNAKE_CASE__ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __lowerCAmelCase ( self : List[Any] , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Optional[str] = None ):
'''simple docstring'''
if not os.path.isdir(SCREAMING_SNAKE_CASE__ ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
__a : Tuple = os.path.join(
SCREAMING_SNAKE_CASE__ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(SCREAMING_SNAKE_CASE__ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , SCREAMING_SNAKE_CASE__ )
elif not os.path.isfile(self.vocab_file ):
with open(SCREAMING_SNAKE_CASE__ , 'wb' ) as fi:
__a : Any = self.sp_model.serialized_model_proto()
fi.write(SCREAMING_SNAKE_CASE__ )
return (out_vocab_file,)
def __lowerCAmelCase ( self : List[Any] , SCREAMING_SNAKE_CASE__ : Union[str, List[str]] , SCREAMING_SNAKE_CASE__ : Union[str, bool] = False ):
'''simple docstring'''
if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
__a : Tuple = self.preprocess_text(SCREAMING_SNAKE_CASE__ )
__a : Dict = self.sp_model.encode(SCREAMING_SNAKE_CASE__ )
else:
__a : Optional[Any] = [self.preprocess_text(SCREAMING_SNAKE_CASE__ ) for t in text]
__a : List[Any] = self.sp_model.encode(SCREAMING_SNAKE_CASE__ )
if return_tensors is True or return_tensors == "pt":
__a : Tuple = torch.tensor(SCREAMING_SNAKE_CASE__ )
return token_ids
def __lowerCAmelCase ( self : str , SCREAMING_SNAKE_CASE__ : Union[int, List[int]] ):
'''simple docstring'''
return self.sp_model.decode(SCREAMING_SNAKE_CASE__ )
def __lowerCAmelCase ( self : List[Any] , SCREAMING_SNAKE_CASE__ : "Conversation" ):
'''simple docstring'''
__a : int = [f'''User: {text}''' if is_user else f'''Bot: {text}''' for is_user, text in conversation.iter_texts()]
__a : int = (
f'''{self.eos_token}{self.bos_token}''' + f'''{self.bos_token}'''.join(SCREAMING_SNAKE_CASE__ ) + f'''{self.bos_token}Bot:'''
)
return self.encode(text=SCREAMING_SNAKE_CASE__ )
| 47 |
import math
from datetime import datetime, timedelta
def UpperCAmelCase__ ( lowerCamelCase_ : int ):
__a : Union[str, Any] = year % 1_9
__a : int = year % 4
__a : Optional[int] = year % 7
__a : Dict = math.floor(year / 1_0_0 )
__a : Optional[Any] = math.floor((1_3 + 8 * leap_day_inhibits) / 2_5 )
__a : Union[str, Any] = leap_day_inhibits / 4
__a : str = (
1_5 - lunar_orbit_correction + leap_day_inhibits - leap_day_reinstall_number
) % 3_0
__a : Union[str, Any] = (4 + leap_day_inhibits - leap_day_reinstall_number) % 7
# days to be added to March 21
__a : List[Any] = (1_9 * metonic_cycle + secular_moon_shift) % 3_0
# PHM -> Paschal Full Moon
__a : List[Any] = (
2 * julian_leap_year
+ 4 * non_leap_year
+ 6 * days_to_add
+ century_starting_point
) % 7
if days_to_add == 2_9 and days_from_phm_to_sunday == 6:
return datetime(lowerCamelCase_ , 4 , 1_9 )
elif days_to_add == 2_8 and days_from_phm_to_sunday == 6:
return datetime(lowerCamelCase_ , 4 , 1_8 )
else:
return datetime(lowerCamelCase_ , 3 , 2_2 ) + timedelta(
days=int(days_to_add + days_from_phm_to_sunday ) )
if __name__ == "__main__":
for year in (1994, 2000, 2010, 2021, 2023):
SCREAMING_SNAKE_CASE__ = '''will be''' if year > datetime.now().year else '''was'''
print(F"Easter in {year} {tense} {gauss_easter(year)}")
| 47 | 1 |
# XXX: we want transformers master here - in the absense of conftest manipulating sys.path:
# hack it in for now:
import sys
from pathlib import Path
SCREAMING_SNAKE_CASE__ = Path(__file__).resolve().parents[3] / '''src'''
sys.path.insert(1, str(git_repo_path))
import dataclasses # noqa
import io # noqa
import itertools # noqa
import json # noqa
import os # noqa
import unittest # noqa
from copy import deepcopy # noqa
from parameterized import parameterized # noqa
from transformers import TrainingArguments, is_torch_available # noqa
from transformers.deepspeed import is_deepspeed_available # noqa
from transformers.file_utils import WEIGHTS_NAME # noqa
from transformers.testing_utils import ( # noqa
CaptureLogger,
ExtendSysPath,
TestCasePlus,
execute_subprocess_async,
get_gpu_count,
mockenv_context,
require_deepspeed,
require_torch_gpu,
require_torch_multi_gpu,
slow,
)
from transformers.trainer_utils import set_seed # noqa
set_seed(42)
SCREAMING_SNAKE_CASE__ = {'''base''': '''patrickvonplaten/wav2vec2_tiny_random''', '''robust''': '''patrickvonplaten/wav2vec2_tiny_random_robust'''}
SCREAMING_SNAKE_CASE__ = '''zero2'''
SCREAMING_SNAKE_CASE__ = '''zero3'''
SCREAMING_SNAKE_CASE__ = [ZEROa, ZEROa]
def UpperCAmelCase__ ( lowerCamelCase_ : Any , lowerCamelCase_ : Dict , lowerCamelCase_ : Optional[Any] ):
# customize the test name generator function as we want both params to appear in the sub-test
# name, as by default it shows only the first param
__a : Optional[int] = parameterized.to_safe_name('_'.join(str(lowerCamelCase_ ) for x in param.args ) )
return f'''{func.__name__}_{param_based_name}'''
# Cartesian-product of zero stages with models to test
SCREAMING_SNAKE_CASE__ = list(itertools.product(stages, models.keys()))
@slow
@require_deepspeed
@require_torch_gpu
class _UpperCamelCase( __lowerCamelCase ):
@parameterized.expand(SCREAMING_SNAKE_CASE__ , name_func=SCREAMING_SNAKE_CASE__ )
def __lowerCAmelCase ( self : List[str] , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : Optional[int] ):
'''simple docstring'''
self.run_and_check(
stage=SCREAMING_SNAKE_CASE__ , model=SCREAMING_SNAKE_CASE__ , distributed=SCREAMING_SNAKE_CASE__ , fpaa=SCREAMING_SNAKE_CASE__ , )
@require_torch_multi_gpu
@parameterized.expand(SCREAMING_SNAKE_CASE__ , name_func=SCREAMING_SNAKE_CASE__ )
def __lowerCAmelCase ( self : List[Any] , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : str ):
'''simple docstring'''
self.run_and_check(
stage=SCREAMING_SNAKE_CASE__ , model=SCREAMING_SNAKE_CASE__ , distributed=SCREAMING_SNAKE_CASE__ , fpaa=SCREAMING_SNAKE_CASE__ , )
@parameterized.expand(SCREAMING_SNAKE_CASE__ , name_func=SCREAMING_SNAKE_CASE__ )
def __lowerCAmelCase ( self : str , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : int ):
'''simple docstring'''
self.run_and_check(
stage=SCREAMING_SNAKE_CASE__ , model=SCREAMING_SNAKE_CASE__ , distributed=SCREAMING_SNAKE_CASE__ , fpaa=SCREAMING_SNAKE_CASE__ , )
@require_torch_multi_gpu
@parameterized.expand(SCREAMING_SNAKE_CASE__ , name_func=SCREAMING_SNAKE_CASE__ )
def __lowerCAmelCase ( self : int , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Tuple ):
'''simple docstring'''
self.run_and_check(
stage=SCREAMING_SNAKE_CASE__ , model=SCREAMING_SNAKE_CASE__ , distributed=SCREAMING_SNAKE_CASE__ , fpaa=SCREAMING_SNAKE_CASE__ , )
def __lowerCAmelCase ( self : List[Any] , SCREAMING_SNAKE_CASE__ : List[Any] ):
'''simple docstring'''
pass
def __lowerCAmelCase ( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : int = 1_0 , SCREAMING_SNAKE_CASE__ : bool = True , SCREAMING_SNAKE_CASE__ : bool = True , SCREAMING_SNAKE_CASE__ : bool = True , ):
'''simple docstring'''
__a : Dict = models[model]
__a : str = self.run_trainer(
stage=SCREAMING_SNAKE_CASE__ , model_name=SCREAMING_SNAKE_CASE__ , eval_steps=SCREAMING_SNAKE_CASE__ , num_train_epochs=1 , distributed=SCREAMING_SNAKE_CASE__ , fpaa=SCREAMING_SNAKE_CASE__ , )
self.do_checks(SCREAMING_SNAKE_CASE__ )
return output_dir
def __lowerCAmelCase ( self : int , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : int = 1_0 , SCREAMING_SNAKE_CASE__ : int = 1 , SCREAMING_SNAKE_CASE__ : bool = True , SCREAMING_SNAKE_CASE__ : bool = True , ):
'''simple docstring'''
__a : Union[str, Any] = self.get_auto_remove_tmp_dir('./xxx' , after=SCREAMING_SNAKE_CASE__ )
__a : int = f'''
--model_name_or_path {model_name}
--dataset_name hf-internal-testing/librispeech_asr_dummy
--dataset_config_name clean
--train_split_name validation
--validation_split_name validation
--output_dir {output_dir}
--num_train_epochs {str(SCREAMING_SNAKE_CASE__ )}
--per_device_train_batch_size 2
--per_device_eval_batch_size 2
--evaluation_strategy steps
--learning_rate 5e-4
--warmup_steps 8
--orthography timit
--preprocessing_num_workers 1
--group_by_length
--freeze_feature_extractor
--report_to none
--save_steps 0
--eval_steps {eval_steps}
--report_to none
'''.split()
if fpaa:
args.extend(['--fp16'] )
# currently ds_config_wav2vec2_zero.json requires "zero_optimization.find_unused_parameters": true,
# hence the separate config files
__a : Tuple = f'''--deepspeed {self.test_file_dir_str}/ds_config_wav2vec2_{stage}.json'''.split()
__a : Any = [f'''{self.examples_dir_str}/research_projects/wav2vec2/run_asr.py''']
__a : str = self.get_launcher(SCREAMING_SNAKE_CASE__ )
__a : int = launcher + script + args + ds_args
# keep for quick debug
# print(" ".join([f"\nPYTHONPATH={self.src_dir_str}"] +cmd)); die
execute_subprocess_async(SCREAMING_SNAKE_CASE__ , env=self.get_env() )
return output_dir
def __lowerCAmelCase ( self : str , SCREAMING_SNAKE_CASE__ : int=False ):
'''simple docstring'''
__a : Optional[int] = min(2 , get_gpu_count() ) if distributed else 1
return f'''deepspeed --num_nodes 1 --num_gpus {num_gpus}'''.split()
| 47 |
from typing import List, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ = {
'''huggingface/informer-tourism-monthly''': (
'''https://huggingface.co/huggingface/informer-tourism-monthly/resolve/main/config.json'''
),
# See all Informer models at https://huggingface.co/models?filter=informer
}
class _UpperCamelCase( __lowerCamelCase ):
__SCREAMING_SNAKE_CASE : List[Any] = '''informer'''
__SCREAMING_SNAKE_CASE : List[Any] = {
'''hidden_size''': '''d_model''',
'''num_attention_heads''': '''encoder_attention_heads''',
'''num_hidden_layers''': '''encoder_layers''',
}
def __init__( self : Optional[int] , SCREAMING_SNAKE_CASE__ : Optional[int] = None , SCREAMING_SNAKE_CASE__ : Optional[int] = None , SCREAMING_SNAKE_CASE__ : str = "student_t" , SCREAMING_SNAKE_CASE__ : str = "nll" , SCREAMING_SNAKE_CASE__ : int = 1 , SCREAMING_SNAKE_CASE__ : List[int] = None , SCREAMING_SNAKE_CASE__ : Optional[Union[str, bool]] = "mean" , SCREAMING_SNAKE_CASE__ : int = 0 , SCREAMING_SNAKE_CASE__ : int = 0 , SCREAMING_SNAKE_CASE__ : int = 0 , SCREAMING_SNAKE_CASE__ : int = 0 , SCREAMING_SNAKE_CASE__ : Optional[List[int]] = None , SCREAMING_SNAKE_CASE__ : Optional[List[int]] = None , SCREAMING_SNAKE_CASE__ : int = 6_4 , SCREAMING_SNAKE_CASE__ : int = 3_2 , SCREAMING_SNAKE_CASE__ : int = 3_2 , SCREAMING_SNAKE_CASE__ : int = 2 , SCREAMING_SNAKE_CASE__ : int = 2 , SCREAMING_SNAKE_CASE__ : int = 2 , SCREAMING_SNAKE_CASE__ : int = 2 , SCREAMING_SNAKE_CASE__ : bool = True , SCREAMING_SNAKE_CASE__ : str = "gelu" , SCREAMING_SNAKE_CASE__ : float = 0.05 , SCREAMING_SNAKE_CASE__ : float = 0.1 , SCREAMING_SNAKE_CASE__ : float = 0.1 , SCREAMING_SNAKE_CASE__ : float = 0.1 , SCREAMING_SNAKE_CASE__ : float = 0.1 , SCREAMING_SNAKE_CASE__ : int = 1_0_0 , SCREAMING_SNAKE_CASE__ : float = 0.02 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=True , SCREAMING_SNAKE_CASE__ : str = "prob" , SCREAMING_SNAKE_CASE__ : int = 5 , SCREAMING_SNAKE_CASE__ : bool = True , **SCREAMING_SNAKE_CASE__ : Tuple , ):
'''simple docstring'''
__a : Dict = prediction_length
__a : Tuple = context_length or prediction_length
__a : Tuple = distribution_output
__a : Tuple = loss
__a : str = input_size
__a : Dict = num_time_features
__a : Optional[int] = lags_sequence if lags_sequence is not None else [1, 2, 3, 4, 5, 6, 7]
__a : str = scaling
__a : Tuple = num_dynamic_real_features
__a : int = num_static_real_features
__a : Dict = num_static_categorical_features
# set cardinality
if cardinality and num_static_categorical_features > 0:
if len(SCREAMING_SNAKE_CASE__ ) != num_static_categorical_features:
raise ValueError(
'The cardinality should be a list of the same length as `num_static_categorical_features`' )
__a : Optional[Any] = cardinality
else:
__a : Optional[int] = [0]
# set embedding_dimension
if embedding_dimension and num_static_categorical_features > 0:
if len(SCREAMING_SNAKE_CASE__ ) != num_static_categorical_features:
raise ValueError(
'The embedding dimension should be a list of the same length as `num_static_categorical_features`' )
__a : int = embedding_dimension
else:
__a : List[Any] = [min(5_0 , (cat + 1) // 2 ) for cat in self.cardinality]
__a : int = num_parallel_samples
# Transformer architecture configuration
__a : str = input_size * len(self.lags_sequence ) + self._number_of_features
__a : Optional[int] = d_model
__a : Union[str, Any] = encoder_attention_heads
__a : int = decoder_attention_heads
__a : Any = encoder_ffn_dim
__a : Union[str, Any] = decoder_ffn_dim
__a : List[Any] = encoder_layers
__a : Optional[int] = decoder_layers
__a : int = dropout
__a : Optional[Any] = attention_dropout
__a : Dict = activation_dropout
__a : Union[str, Any] = encoder_layerdrop
__a : Optional[int] = decoder_layerdrop
__a : List[str] = activation_function
__a : str = init_std
__a : Optional[int] = use_cache
# Informer
__a : Union[str, Any] = attention_type
__a : str = sampling_factor
__a : Dict = distil
super().__init__(is_encoder_decoder=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
@property
def __lowerCAmelCase ( self : Any ):
'''simple docstring'''
return (
sum(self.embedding_dimension )
+ self.num_dynamic_real_features
+ self.num_time_features
+ self.num_static_real_features
+ self.input_size * 2 # the log1p(abs(loc)) and log(scale) features
)
| 47 | 1 |
import io
import os
import unicodedata
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ = '''▁'''
SCREAMING_SNAKE_CASE__ = {'''vocab_file''': '''vocab.txt''', '''sentencepiece_model_ckpt''': '''sentencepiece.bpe.model'''}
SCREAMING_SNAKE_CASE__ = {
'''sentencepiece_model_file''': '''sentencepiece.bpe.model''',
'''vocab_file''': '''vocab.txt''',
}
SCREAMING_SNAKE_CASE__ = {
'''vocab_file''': {
'''ernie-m-base''': '''https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/vocab.txt''',
'''ernie-m-large''': '''https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/vocab.txt''',
},
'''sentencepiece_model_file''': {
'''ernie-m-base''': '''https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/sentencepiece.bpe.model''',
'''ernie-m-large''': '''https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/sentencepiece.bpe.model''',
},
}
SCREAMING_SNAKE_CASE__ = {
'''ernie-m-base''': 514,
'''ernie-m-large''': 514,
}
SCREAMING_SNAKE_CASE__ = {
'''ernie-m-base''': {'''do_lower_case''': False},
'''ernie-m-large''': {'''do_lower_case''': False},
}
class _UpperCamelCase( __lowerCamelCase ):
__SCREAMING_SNAKE_CASE : List[str] = ["input_ids"]
__SCREAMING_SNAKE_CASE : Optional[Any] = VOCAB_FILES_NAMES
__SCREAMING_SNAKE_CASE : Optional[Any] = PRETRAINED_INIT_CONFIGURATION
__SCREAMING_SNAKE_CASE : Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__SCREAMING_SNAKE_CASE : List[Any] = PRETRAINED_VOCAB_FILES_MAP
__SCREAMING_SNAKE_CASE : int = RESOURCE_FILES_NAMES
def __init__( self : Dict , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : Any=None , SCREAMING_SNAKE_CASE__ : List[str]=False , SCREAMING_SNAKE_CASE__ : List[str]="utf8" , SCREAMING_SNAKE_CASE__ : Optional[Any]="[UNK]" , SCREAMING_SNAKE_CASE__ : List[str]="[SEP]" , SCREAMING_SNAKE_CASE__ : Optional[Any]="[PAD]" , SCREAMING_SNAKE_CASE__ : List[str]="[CLS]" , SCREAMING_SNAKE_CASE__ : Tuple="[MASK]" , SCREAMING_SNAKE_CASE__ : Optional[Dict[str, Any]] = None , **SCREAMING_SNAKE_CASE__ : List[Any] , ):
'''simple docstring'''
__a : Tuple = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=SCREAMING_SNAKE_CASE__ , unk_token=SCREAMING_SNAKE_CASE__ , sep_token=SCREAMING_SNAKE_CASE__ , pad_token=SCREAMING_SNAKE_CASE__ , cls_token=SCREAMING_SNAKE_CASE__ , mask_token=SCREAMING_SNAKE_CASE__ , vocab_file=SCREAMING_SNAKE_CASE__ , encoding=SCREAMING_SNAKE_CASE__ , sp_model_kwargs=self.sp_model_kwargs , **SCREAMING_SNAKE_CASE__ , )
__a : Optional[int] = do_lower_case
__a : List[str] = sentencepiece_model_ckpt
__a : Optional[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(SCREAMING_SNAKE_CASE__ )
# to mimic paddlenlp.transformers.ernie_m.tokenizer.ErnieMTokenizer functioning
if vocab_file is not None:
__a : Optional[Any] = self.load_vocab(filepath=SCREAMING_SNAKE_CASE__ )
else:
__a : int = {self.sp_model.id_to_piece(SCREAMING_SNAKE_CASE__ ): id for id in range(self.sp_model.get_piece_size() )}
__a : str = {v: k for k, v in self.vocab.items()}
def __lowerCAmelCase ( self : Optional[int] , SCREAMING_SNAKE_CASE__ : Optional[Any] ):
'''simple docstring'''
if text is None:
return None
__a : List[Any] = self.tokenize(SCREAMING_SNAKE_CASE__ )
__a , __a : Union[str, Any] = '', []
for i, ch in enumerate(SCREAMING_SNAKE_CASE__ ):
if ch in self.SP_CHAR_MAPPING:
__a : List[str] = self.SP_CHAR_MAPPING.get(SCREAMING_SNAKE_CASE__ )
else:
__a : Any = unicodedata.normalize('NFKC' , SCREAMING_SNAKE_CASE__ )
if self.is_whitespace(SCREAMING_SNAKE_CASE__ ):
continue
normalized_text += ch
char_mapping.extend([i] * len(SCREAMING_SNAKE_CASE__ ) )
__a , __a , __a : str = normalized_text, [], 0
if self.do_lower_case:
__a : Tuple = text.lower()
for token in split_tokens:
if token[:1] == "▁":
__a : List[Any] = token[1:]
__a : Optional[Any] = text[offset:].index(SCREAMING_SNAKE_CASE__ ) + offset
__a : List[Any] = start + len(SCREAMING_SNAKE_CASE__ )
token_mapping.append((char_mapping[start], char_mapping[end - 1] + 1) )
__a : Tuple = end
return token_mapping
@property
def __lowerCAmelCase ( self : Dict ):
'''simple docstring'''
return len(self.vocab )
def __lowerCAmelCase ( self : List[Any] ):
'''simple docstring'''
return dict(self.vocab , **self.added_tokens_encoder )
def __getstate__( self : Tuple ):
'''simple docstring'''
__a : Tuple = self.__dict__.copy()
__a : Any = None
return state
def __setstate__( self : List[Any] , SCREAMING_SNAKE_CASE__ : List[str] ):
'''simple docstring'''
__a : Optional[Any] = d
# for backward compatibility
if not hasattr(self , 'sp_model_kwargs' ):
__a : str = {}
__a : Optional[int] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.sentencepiece_model_ckpt )
def __lowerCAmelCase ( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : Dict ):
'''simple docstring'''
return "".join((self.SP_CHAR_MAPPING.get(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) for c in text) )
def __lowerCAmelCase ( self : int , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : int=False , SCREAMING_SNAKE_CASE__ : Tuple=6_4 , SCREAMING_SNAKE_CASE__ : List[str]=0.1 ):
'''simple docstring'''
if self.sp_model_kwargs.get('enable_sampling' ) is True:
__a : Dict = True
if self.sp_model_kwargs.get('alpha' ) is not None:
__a : int = self.sp_model_kwargs.get('alpha' )
if self.sp_model_kwargs.get('nbest_size' ) is not None:
__a : Any = self.sp_model_kwargs.get('nbest_size' )
if not enable_sampling:
__a : int = self.sp_model.EncodeAsPieces(SCREAMING_SNAKE_CASE__ )
else:
__a : Optional[Any] = self.sp_model.SampleEncodeAsPieces(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
__a : str = []
for pi, piece in enumerate(SCREAMING_SNAKE_CASE__ ):
if piece == SPIECE_UNDERLINE:
if not pieces[pi + 1].startswith(SCREAMING_SNAKE_CASE__ ) and pi != 0:
new_pieces.append(SCREAMING_SNAKE_CASE__ )
continue
else:
continue
__a : Optional[int] = 0
for i, chunk in enumerate(SCREAMING_SNAKE_CASE__ ):
if chunk == SPIECE_UNDERLINE:
continue
if self.is_ch_char(SCREAMING_SNAKE_CASE__ ) or self.is_punct(SCREAMING_SNAKE_CASE__ ):
if i > lst_i and piece[lst_i:i] != SPIECE_UNDERLINE:
new_pieces.append(piece[lst_i:i] )
new_pieces.append(SCREAMING_SNAKE_CASE__ )
__a : Union[str, Any] = i + 1
elif chunk.isdigit() and i > 0 and not piece[i - 1].isdigit():
if i > lst_i and piece[lst_i:i] != SPIECE_UNDERLINE:
new_pieces.append(piece[lst_i:i] )
__a : int = i
elif not chunk.isdigit() and i > 0 and piece[i - 1].isdigit():
if i > lst_i and piece[lst_i:i] != SPIECE_UNDERLINE:
new_pieces.append(piece[lst_i:i] )
__a : Dict = i
if len(SCREAMING_SNAKE_CASE__ ) > lst_i:
new_pieces.append(piece[lst_i:] )
return new_pieces
def __lowerCAmelCase ( self : List[Any] , SCREAMING_SNAKE_CASE__ : Optional[Any] ):
'''simple docstring'''
__a : int = ''.join(SCREAMING_SNAKE_CASE__ ).replace(SCREAMING_SNAKE_CASE__ , ' ' ).strip()
return out_string
def __lowerCAmelCase ( self : Dict , SCREAMING_SNAKE_CASE__ : Dict ):
'''simple docstring'''
__a : str = self.convert_ids_to_tokens(SCREAMING_SNAKE_CASE__ )
__a : Optional[Any] = ''.join(SCREAMING_SNAKE_CASE__ ).replace(SCREAMING_SNAKE_CASE__ , ' ' ).strip()
return out_string
def __lowerCAmelCase ( self : Any , SCREAMING_SNAKE_CASE__ : Union[str, Any] ):
'''simple docstring'''
return self.vocab.get(SCREAMING_SNAKE_CASE__ , self.vocab.get(self.unk_token ) )
def __lowerCAmelCase ( self : Optional[int] , SCREAMING_SNAKE_CASE__ : Dict ):
'''simple docstring'''
return self.reverse_vocab.get(SCREAMING_SNAKE_CASE__ , self.unk_token )
def __lowerCAmelCase ( self : str , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : List[Any]=None ):
'''simple docstring'''
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
__a : Union[str, Any] = [self.cls_token_id]
__a : Union[str, Any] = [self.sep_token_id]
return _cls + token_ids_a + _sep + _sep + token_ids_a + _sep
def __lowerCAmelCase ( self : Dict , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : str=None ):
'''simple docstring'''
if offset_mapping_a is None:
return [(0, 0)] + offset_mapping_a + [(0, 0)]
return [(0, 0)] + offset_mapping_a + [(0, 0), (0, 0)] + offset_mapping_a + [(0, 0)]
def __lowerCAmelCase ( self : Tuple , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : List[Any]=None , SCREAMING_SNAKE_CASE__ : Optional[int]=False ):
'''simple docstring'''
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
'You should not supply a second sequence if the provided sequence of '
'ids is already formatted with special tokens for the model.' )
return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a]
if token_ids_a is not None:
return [1] + ([0] * len(SCREAMING_SNAKE_CASE__ )) + [1, 1] + ([0] * len(SCREAMING_SNAKE_CASE__ )) + [1]
return [1] + ([0] * len(SCREAMING_SNAKE_CASE__ )) + [1]
def __lowerCAmelCase ( self : List[Any] , SCREAMING_SNAKE_CASE__ : List[int] , SCREAMING_SNAKE_CASE__ : Optional[List[int]] = None ):
'''simple docstring'''
if token_ids_a is None:
# [CLS] X [SEP]
return (len(SCREAMING_SNAKE_CASE__ ) + 2) * [0]
# [CLS] A [SEP] [SEP] B [SEP]
return [0] * (len(SCREAMING_SNAKE_CASE__ ) + 1) + [1] * (len(SCREAMING_SNAKE_CASE__ ) + 3)
def __lowerCAmelCase ( self : Optional[int] , SCREAMING_SNAKE_CASE__ : Dict ):
'''simple docstring'''
if "\u4e00" <= char <= "\u9fff":
return True
return False
def __lowerCAmelCase ( self : Union[str, Any] , SCREAMING_SNAKE_CASE__ : str ):
'''simple docstring'''
if ("a" <= char <= "z") or ("A" <= char <= "Z"):
return True
return False
def __lowerCAmelCase ( self : str , SCREAMING_SNAKE_CASE__ : Any ):
'''simple docstring'''
if char in ",;:.?!~,;:。?!《》【】":
return True
return False
def __lowerCAmelCase ( self : str , SCREAMING_SNAKE_CASE__ : Dict ):
'''simple docstring'''
if char == " " or char == "\t" or char == "\n" or char == "\r":
return True
if len(SCREAMING_SNAKE_CASE__ ) == 1:
__a : str = unicodedata.category(SCREAMING_SNAKE_CASE__ )
if cat == "Zs":
return True
return False
def __lowerCAmelCase ( self : List[Any] , SCREAMING_SNAKE_CASE__ : Optional[Any] ):
'''simple docstring'''
__a : Union[str, Any] = {}
with io.open(SCREAMING_SNAKE_CASE__ , 'r' , encoding='utf-8' ) as f:
for index, line in enumerate(SCREAMING_SNAKE_CASE__ ):
__a : Optional[Any] = line.rstrip('\n' )
__a : Dict = int(SCREAMING_SNAKE_CASE__ )
return token_to_idx
def __lowerCAmelCase ( self : Any , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Optional[str] = None ):
'''simple docstring'''
__a : List[Any] = 0
if os.path.isdir(SCREAMING_SNAKE_CASE__ ):
__a : Union[str, Any] = os.path.join(
SCREAMING_SNAKE_CASE__ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
else:
__a : Union[str, Any] = (filename_prefix + '-' if filename_prefix else '') + save_directory
with open(SCREAMING_SNAKE_CASE__ , 'w' , encoding='utf-8' ) as writer:
for token, token_index in sorted(self.vocab.items() , key=lambda SCREAMING_SNAKE_CASE__ : kv[1] ):
if index != token_index:
logger.warning(
f'''Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive.'''
' Please check that the vocabulary is not corrupted!' )
__a : List[Any] = token_index
writer.write(token + '\n' )
index += 1
__a : List[Any] = os.path.join(SCREAMING_SNAKE_CASE__ , 'sentencepiece.bpe.model' )
with open(SCREAMING_SNAKE_CASE__ , 'wb' ) as fi:
__a : str = self.sp_model.serialized_model_proto()
fi.write(SCREAMING_SNAKE_CASE__ )
return (vocab_file,)
| 47 |
import torch
from diffusers import DDIMParallelScheduler
from .test_schedulers import SchedulerCommonTest
class _UpperCamelCase( __lowerCamelCase ):
__SCREAMING_SNAKE_CASE : int = (DDIMParallelScheduler,)
__SCREAMING_SNAKE_CASE : Union[str, Any] = (('''eta''', 0.0), ('''num_inference_steps''', 50))
def __lowerCAmelCase ( self : str , **SCREAMING_SNAKE_CASE__ : Optional[int] ):
'''simple docstring'''
__a : List[Any] = {
'num_train_timesteps': 1_0_0_0,
'beta_start': 0.0_001,
'beta_end': 0.02,
'beta_schedule': 'linear',
'clip_sample': True,
}
config.update(**SCREAMING_SNAKE_CASE__ )
return config
def __lowerCAmelCase ( self : str , **SCREAMING_SNAKE_CASE__ : Optional[int] ):
'''simple docstring'''
__a : Tuple = self.scheduler_classes[0]
__a : Optional[Any] = self.get_scheduler_config(**SCREAMING_SNAKE_CASE__ )
__a : Any = scheduler_class(**SCREAMING_SNAKE_CASE__ )
__a , __a : List[str] = 1_0, 0.0
__a : Dict = self.dummy_model()
__a : str = self.dummy_sample_deter
scheduler.set_timesteps(SCREAMING_SNAKE_CASE__ )
for t in scheduler.timesteps:
__a : str = model(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
__a : List[str] = scheduler.step(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ).prev_sample
return sample
def __lowerCAmelCase ( self : Tuple ):
'''simple docstring'''
for timesteps in [1_0_0, 5_0_0, 1_0_0_0]:
self.check_over_configs(num_train_timesteps=SCREAMING_SNAKE_CASE__ )
def __lowerCAmelCase ( self : Dict ):
'''simple docstring'''
for steps_offset in [0, 1]:
self.check_over_configs(steps_offset=SCREAMING_SNAKE_CASE__ )
__a : Optional[Any] = self.scheduler_classes[0]
__a : List[str] = self.get_scheduler_config(steps_offset=1 )
__a : Optional[Any] = scheduler_class(**SCREAMING_SNAKE_CASE__ )
scheduler.set_timesteps(5 )
assert torch.equal(scheduler.timesteps , torch.LongTensor([8_0_1, 6_0_1, 4_0_1, 2_0_1, 1] ) )
def __lowerCAmelCase ( self : Optional[int] ):
'''simple docstring'''
for beta_start, beta_end in zip([0.0_001, 0.001, 0.01, 0.1] , [0.002, 0.02, 0.2, 2] ):
self.check_over_configs(beta_start=SCREAMING_SNAKE_CASE__ , beta_end=SCREAMING_SNAKE_CASE__ )
def __lowerCAmelCase ( self : int ):
'''simple docstring'''
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=SCREAMING_SNAKE_CASE__ )
def __lowerCAmelCase ( self : Optional[Any] ):
'''simple docstring'''
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=SCREAMING_SNAKE_CASE__ )
def __lowerCAmelCase ( self : List[Any] ):
'''simple docstring'''
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=SCREAMING_SNAKE_CASE__ )
def __lowerCAmelCase ( self : Optional[Any] ):
'''simple docstring'''
for timestep_spacing in ["trailing", "leading"]:
self.check_over_configs(timestep_spacing=SCREAMING_SNAKE_CASE__ )
def __lowerCAmelCase ( self : int ):
'''simple docstring'''
for rescale_betas_zero_snr in [True, False]:
self.check_over_configs(rescale_betas_zero_snr=SCREAMING_SNAKE_CASE__ )
def __lowerCAmelCase ( self : str ):
'''simple docstring'''
self.check_over_configs(thresholding=SCREAMING_SNAKE_CASE__ )
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(
thresholding=SCREAMING_SNAKE_CASE__ , prediction_type=SCREAMING_SNAKE_CASE__ , sample_max_value=SCREAMING_SNAKE_CASE__ , )
def __lowerCAmelCase ( self : Optional[int] ):
'''simple docstring'''
for t in [1, 1_0, 4_9]:
self.check_over_forward(time_step=SCREAMING_SNAKE_CASE__ )
def __lowerCAmelCase ( self : List[str] ):
'''simple docstring'''
for t, num_inference_steps in zip([1, 1_0, 5_0] , [1_0, 5_0, 5_0_0] ):
self.check_over_forward(time_step=SCREAMING_SNAKE_CASE__ , num_inference_steps=SCREAMING_SNAKE_CASE__ )
def __lowerCAmelCase ( self : List[Any] ):
'''simple docstring'''
for t, eta in zip([1, 1_0, 4_9] , [0.0, 0.5, 1.0] ):
self.check_over_forward(time_step=SCREAMING_SNAKE_CASE__ , eta=SCREAMING_SNAKE_CASE__ )
def __lowerCAmelCase ( self : List[Any] ):
'''simple docstring'''
__a : List[str] = self.scheduler_classes[0]
__a : Union[str, Any] = self.get_scheduler_config()
__a : Any = scheduler_class(**SCREAMING_SNAKE_CASE__ )
assert torch.sum(torch.abs(scheduler._get_variance(0 , 0 ) - 0.0 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(4_2_0 , 4_0_0 ) - 0.14_771 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(9_8_0 , 9_6_0 ) - 0.32_460 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(0 , 0 ) - 0.0 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(4_8_7 , 4_8_6 ) - 0.00_979 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(9_9_9 , 9_9_8 ) - 0.02 ) ) < 1e-5
def __lowerCAmelCase ( self : Optional[Any] ):
'''simple docstring'''
__a : List[str] = self.scheduler_classes[0]
__a : List[str] = self.get_scheduler_config()
__a : Optional[Any] = scheduler_class(**SCREAMING_SNAKE_CASE__ )
__a , __a : Any = 1_0, 0.0
scheduler.set_timesteps(SCREAMING_SNAKE_CASE__ )
__a : List[Any] = self.dummy_model()
__a : int = self.dummy_sample_deter
__a : List[Any] = self.dummy_sample_deter + 0.1
__a : List[str] = self.dummy_sample_deter - 0.1
__a : Optional[Any] = samplea.shape[0]
__a : Optional[Any] = torch.stack([samplea, samplea, samplea] , dim=0 )
__a : Union[str, Any] = torch.arange(SCREAMING_SNAKE_CASE__ )[0:3, None].repeat(1 , SCREAMING_SNAKE_CASE__ )
__a : int = model(samples.flatten(0 , 1 ) , timesteps.flatten(0 , 1 ) )
__a : int = scheduler.batch_step_no_noise(SCREAMING_SNAKE_CASE__ , timesteps.flatten(0 , 1 ) , samples.flatten(0 , 1 ) , SCREAMING_SNAKE_CASE__ )
__a : Dict = torch.sum(torch.abs(SCREAMING_SNAKE_CASE__ ) )
__a : Optional[Any] = torch.mean(torch.abs(SCREAMING_SNAKE_CASE__ ) )
assert abs(result_sum.item() - 1_147.7_904 ) < 1e-2
assert abs(result_mean.item() - 0.4_982 ) < 1e-3
def __lowerCAmelCase ( self : str ):
'''simple docstring'''
__a : List[str] = self.full_loop()
__a : Tuple = torch.sum(torch.abs(SCREAMING_SNAKE_CASE__ ) )
__a : int = torch.mean(torch.abs(SCREAMING_SNAKE_CASE__ ) )
assert abs(result_sum.item() - 172.0_067 ) < 1e-2
assert abs(result_mean.item() - 0.223_967 ) < 1e-3
def __lowerCAmelCase ( self : Optional[int] ):
'''simple docstring'''
__a : Optional[int] = self.full_loop(prediction_type='v_prediction' )
__a : str = torch.sum(torch.abs(SCREAMING_SNAKE_CASE__ ) )
__a : Union[str, Any] = torch.mean(torch.abs(SCREAMING_SNAKE_CASE__ ) )
assert abs(result_sum.item() - 52.5_302 ) < 1e-2
assert abs(result_mean.item() - 0.0_684 ) < 1e-3
def __lowerCAmelCase ( self : int ):
'''simple docstring'''
__a : Union[str, Any] = self.full_loop(set_alpha_to_one=SCREAMING_SNAKE_CASE__ , beta_start=0.01 )
__a : Optional[int] = torch.sum(torch.abs(SCREAMING_SNAKE_CASE__ ) )
__a : Optional[int] = torch.mean(torch.abs(SCREAMING_SNAKE_CASE__ ) )
assert abs(result_sum.item() - 149.8_295 ) < 1e-2
assert abs(result_mean.item() - 0.1_951 ) < 1e-3
def __lowerCAmelCase ( self : str ):
'''simple docstring'''
__a : Dict = self.full_loop(set_alpha_to_one=SCREAMING_SNAKE_CASE__ , beta_start=0.01 )
__a : str = torch.sum(torch.abs(SCREAMING_SNAKE_CASE__ ) )
__a : Tuple = torch.mean(torch.abs(SCREAMING_SNAKE_CASE__ ) )
assert abs(result_sum.item() - 149.0_784 ) < 1e-2
assert abs(result_mean.item() - 0.1_941 ) < 1e-3
| 47 | 1 |
import pickle
import unittest
import torch
from accelerate import Accelerator
from accelerate.state import AcceleratorState
from accelerate.test_utils import require_cpu
@require_cpu
class _UpperCamelCase( unittest.TestCase ):
def __lowerCAmelCase ( self : Union[str, Any] ):
'''simple docstring'''
__a : Tuple = torch.nn.Linear(1_0 , 1_0 )
__a : Union[str, Any] = torch.optim.SGD(model.parameters() , 0.1 )
__a : Dict = Accelerator()
__a : Union[str, Any] = accelerator.prepare(SCREAMING_SNAKE_CASE__ )
try:
pickle.loads(pickle.dumps(SCREAMING_SNAKE_CASE__ ) )
except Exception as e:
self.fail(f'''Accelerated optimizer pickling failed with {e}''' )
AcceleratorState._reset_state()
| 47 |
def UpperCAmelCase__ ( lowerCamelCase_ : list[int] , lowerCamelCase_ : list[int] ):
# Check if the input is valid
if not len(lowerCamelCase_ ) == len(lowerCamelCase_ ) == 3:
raise ValueError('Please enter a valid equation.' )
if equationa[0] == equationa[1] == equationa[0] == equationa[1] == 0:
raise ValueError('Both a & b of two equations can\'t be zero.' )
# Extract the coefficients
__a , __a , __a : Optional[Any] = equationa
__a , __a , __a : Optional[int] = equationa
# Calculate the determinants of the matrices
__a : str = aa * ba - aa * ba
__a : Tuple = ca * ba - ca * ba
__a : Union[str, Any] = aa * ca - aa * ca
# Check if the system of linear equations has a solution (using Cramer's rule)
if determinant == 0:
if determinant_x == determinant_y == 0:
raise ValueError('Infinite solutions. (Consistent system)' )
else:
raise ValueError('No solution. (Inconsistent system)' )
else:
if determinant_x == determinant_y == 0:
# Trivial solution (Inconsistent system)
return (0.0, 0.0)
else:
__a : Any = determinant_x / determinant
__a : Optional[Any] = determinant_y / determinant
# Non-Trivial Solution (Consistent system)
return (x, y)
| 47 | 1 |
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
StableDiffusionAttendAndExcitePipeline,
UNetaDConditionModel,
)
from diffusers.utils import load_numpy, skip_mps, slow
from diffusers.utils.testing_utils import require_torch_gpu
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
SCREAMING_SNAKE_CASE__ = False
@skip_mps
class _UpperCamelCase( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , unittest.TestCase ):
__SCREAMING_SNAKE_CASE : Dict = StableDiffusionAttendAndExcitePipeline
__SCREAMING_SNAKE_CASE : Tuple = False
__SCREAMING_SNAKE_CASE : List[str] = TEXT_TO_IMAGE_PARAMS
__SCREAMING_SNAKE_CASE : Dict = TEXT_TO_IMAGE_BATCH_PARAMS.union({'''token_indices'''} )
__SCREAMING_SNAKE_CASE : List[str] = TEXT_TO_IMAGE_IMAGE_PARAMS
__SCREAMING_SNAKE_CASE : Dict = TEXT_TO_IMAGE_IMAGE_PARAMS
@classmethod
def __lowerCAmelCase ( cls : List[Any] ):
'''simple docstring'''
super().setUpClass()
torch.use_deterministic_algorithms(SCREAMING_SNAKE_CASE__ )
@classmethod
def __lowerCAmelCase ( cls : Optional[int] ):
'''simple docstring'''
super().tearDownClass()
torch.use_deterministic_algorithms(SCREAMING_SNAKE_CASE__ )
def __lowerCAmelCase ( self : Dict ):
'''simple docstring'''
torch.manual_seed(0 )
__a : Tuple = UNetaDConditionModel(
block_out_channels=(3_2, 6_4) , layers_per_block=1 , sample_size=3_2 , in_channels=4 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=3_2 , attention_head_dim=(2, 4) , use_linear_projection=SCREAMING_SNAKE_CASE__ , )
__a : List[Any] = DDIMScheduler(
beta_start=0.00_085 , beta_end=0.012 , beta_schedule='scaled_linear' , clip_sample=SCREAMING_SNAKE_CASE__ , set_alpha_to_one=SCREAMING_SNAKE_CASE__ , )
torch.manual_seed(0 )
__a : str = AutoencoderKL(
block_out_channels=[3_2, 6_4] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , sample_size=1_2_8 , )
torch.manual_seed(0 )
__a : Optional[Any] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=3_2 , intermediate_size=3_7 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , hidden_act='gelu' , projection_dim=5_1_2 , )
__a : Tuple = CLIPTextModel(SCREAMING_SNAKE_CASE__ )
__a : Dict = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
__a : Any = {
'unet': unet,
'scheduler': scheduler,
'vae': vae,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'safety_checker': None,
'feature_extractor': None,
}
return components
def __lowerCAmelCase ( self : List[Any] , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : str=0 ):
'''simple docstring'''
if str(SCREAMING_SNAKE_CASE__ ).startswith('mps' ):
__a : Tuple = torch.manual_seed(SCREAMING_SNAKE_CASE__ )
else:
__a : Union[str, Any] = torch.Generator(device=SCREAMING_SNAKE_CASE__ ).manual_seed(SCREAMING_SNAKE_CASE__ )
__a : List[str] = {
'prompt': 'a cat and a frog',
'token_indices': [2, 5],
'generator': generator,
'num_inference_steps': 1,
'guidance_scale': 6.0,
'output_type': 'numpy',
'max_iter_to_alter': 2,
'thresholds': {0: 0.7},
}
return inputs
def __lowerCAmelCase ( self : Dict ):
'''simple docstring'''
__a : Tuple = 'cpu'
__a : Optional[int] = self.get_dummy_components()
__a : Union[str, Any] = self.pipeline_class(**SCREAMING_SNAKE_CASE__ )
pipe.to(SCREAMING_SNAKE_CASE__ )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__ )
__a : int = self.get_dummy_inputs(SCREAMING_SNAKE_CASE__ )
__a : int = pipe(**SCREAMING_SNAKE_CASE__ ).images
__a : Optional[int] = image[0, -3:, -3:, -1]
self.assertEqual(image.shape , (1, 6_4, 6_4, 3) )
__a : Optional[Any] = np.array(
[0.63_905_364, 0.62_897_307, 0.48_599_017, 0.5_133_624, 0.5_550_048, 0.45_769_516, 0.50_326_973, 0.5_023_139, 0.45_384_496] )
__a : Any = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(SCREAMING_SNAKE_CASE__ , 1e-3 )
def __lowerCAmelCase ( self : int ):
'''simple docstring'''
super().test_cpu_offload_forward_pass(expected_max_diff=5e-4 )
def __lowerCAmelCase ( self : Optional[Any] ):
'''simple docstring'''
self._test_inference_batch_consistent(batch_sizes=[1, 2] )
def __lowerCAmelCase ( self : Dict ):
'''simple docstring'''
self._test_inference_batch_single_identical(batch_size=2 , expected_max_diff=7e-4 )
def __lowerCAmelCase ( self : int ):
'''simple docstring'''
super().test_dict_tuple_outputs_equivalent(expected_max_difference=3e-3 )
def __lowerCAmelCase ( self : str ):
'''simple docstring'''
super().test_pt_np_pil_outputs_equivalent(expected_max_diff=5e-4 )
def __lowerCAmelCase ( self : Optional[Any] ):
'''simple docstring'''
super().test_save_load_local(expected_max_difference=5e-4 )
def __lowerCAmelCase ( self : Dict ):
'''simple docstring'''
super().test_save_load_optional_components(expected_max_difference=4e-4 )
@require_torch_gpu
@slow
class _UpperCamelCase( unittest.TestCase ):
@classmethod
def __lowerCAmelCase ( cls : str ):
'''simple docstring'''
super().setUpClass()
torch.use_deterministic_algorithms(SCREAMING_SNAKE_CASE__ )
@classmethod
def __lowerCAmelCase ( cls : Tuple ):
'''simple docstring'''
super().tearDownClass()
torch.use_deterministic_algorithms(SCREAMING_SNAKE_CASE__ )
def __lowerCAmelCase ( self : Tuple ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __lowerCAmelCase ( self : int ):
'''simple docstring'''
__a : List[str] = torch.manual_seed(5_1 )
__a : Optional[int] = StableDiffusionAttendAndExcitePipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4' , safety_checker=SCREAMING_SNAKE_CASE__ , torch_dtype=torch.floataa )
pipe.to('cuda' )
__a : List[str] = 'a painting of an elephant with glasses'
__a : Any = [5, 7]
__a : Tuple = pipe(
prompt=SCREAMING_SNAKE_CASE__ , token_indices=SCREAMING_SNAKE_CASE__ , guidance_scale=7.5 , generator=SCREAMING_SNAKE_CASE__ , num_inference_steps=5 , max_iter_to_alter=5 , output_type='numpy' , ).images[0]
__a : Any = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/attend-and-excite/elephant_glasses.npy' )
assert np.abs((expected_image - image).max() ) < 5e-1
| 47 |
from ...utils import is_note_seq_available, is_transformers_available, is_torch_available
from ...utils import OptionalDependencyNotAvailable
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .notes_encoder import SpectrogramNotesEncoder
from .continous_encoder import SpectrogramContEncoder
from .pipeline_spectrogram_diffusion import (
SpectrogramContEncoder,
SpectrogramDiffusionPipeline,
TaFilmDecoder,
)
try:
if not (is_transformers_available() and is_torch_available() and is_note_seq_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_transformers_and_torch_and_note_seq_objects import * # noqa F403
else:
from .midi_utils import MidiProcessor
| 47 | 1 |
import warnings
from contextlib import contextmanager
from ....processing_utils import ProcessorMixin
class _UpperCamelCase( __lowerCamelCase ):
__SCREAMING_SNAKE_CASE : Any = '''MCTCTFeatureExtractor'''
__SCREAMING_SNAKE_CASE : Tuple = '''AutoTokenizer'''
def __init__( self : int , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : str ):
'''simple docstring'''
super().__init__(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
__a : int = self.feature_extractor
__a : Union[str, Any] = False
def __call__( self : Tuple , *SCREAMING_SNAKE_CASE__ : List[Any] , **SCREAMING_SNAKE_CASE__ : Tuple ):
'''simple docstring'''
if self._in_target_context_manager:
return self.current_processor(*SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
if "raw_speech" in kwargs:
warnings.warn('Using `raw_speech` as a keyword argument is deprecated. Use `audio` instead.' )
__a : Optional[Any] = kwargs.pop('raw_speech' )
else:
__a : Union[str, Any] = kwargs.pop('audio' , SCREAMING_SNAKE_CASE__ )
__a : Tuple = kwargs.pop('sampling_rate' , SCREAMING_SNAKE_CASE__ )
__a : Any = kwargs.pop('text' , SCREAMING_SNAKE_CASE__ )
if len(SCREAMING_SNAKE_CASE__ ) > 0:
__a : List[str] = args[0]
__a : Tuple = args[1:]
if audio is None and text is None:
raise ValueError('You need to specify either an `audio` or `text` input to process.' )
if audio is not None:
__a : Union[str, Any] = self.feature_extractor(SCREAMING_SNAKE_CASE__ , *SCREAMING_SNAKE_CASE__ , sampling_rate=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
if text is not None:
__a : Union[str, Any] = self.tokenizer(SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
if text is None:
return inputs
elif audio is None:
return encodings
else:
__a : List[str] = encodings['input_ids']
return inputs
def __lowerCAmelCase ( self : Optional[int] , *SCREAMING_SNAKE_CASE__ : Optional[Any] , **SCREAMING_SNAKE_CASE__ : int ):
'''simple docstring'''
return self.tokenizer.batch_decode(*SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
def __lowerCAmelCase ( self : Optional[Any] , *SCREAMING_SNAKE_CASE__ : List[Any] , **SCREAMING_SNAKE_CASE__ : List[str] ):
'''simple docstring'''
if self._in_target_context_manager:
return self.current_processor.pad(*SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
__a : Dict = kwargs.pop('input_features' , SCREAMING_SNAKE_CASE__ )
__a : Union[str, Any] = kwargs.pop('labels' , SCREAMING_SNAKE_CASE__ )
if len(SCREAMING_SNAKE_CASE__ ) > 0:
__a : Any = args[0]
__a : Union[str, Any] = args[1:]
if input_features is not None:
__a : Tuple = self.feature_extractor.pad(SCREAMING_SNAKE_CASE__ , *SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
if labels is not None:
__a : int = self.tokenizer.pad(SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
if labels is None:
return input_features
elif input_features is None:
return labels
else:
__a : str = labels['input_ids']
return input_features
def __lowerCAmelCase ( self : Dict , *SCREAMING_SNAKE_CASE__ : List[str] , **SCREAMING_SNAKE_CASE__ : str ):
'''simple docstring'''
return self.tokenizer.decode(*SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
@contextmanager
def __lowerCAmelCase ( self : int ):
'''simple docstring'''
warnings.warn(
'`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your '
'labels by using the argument `text` of the regular `__call__` method (either in the same call as '
'your audio inputs, or in a separate call.' )
__a : Any = True
__a : str = self.tokenizer
yield
__a : List[str] = self.feature_extractor
__a : Any = False
| 47 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
SCREAMING_SNAKE_CASE__ = {
'''configuration_bridgetower''': [
'''BRIDGETOWER_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''BridgeTowerConfig''',
'''BridgeTowerTextConfig''',
'''BridgeTowerVisionConfig''',
],
'''processing_bridgetower''': ['''BridgeTowerProcessor'''],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ = ['''BridgeTowerImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ = [
'''BRIDGETOWER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''BridgeTowerForContrastiveLearning''',
'''BridgeTowerForImageAndTextRetrieval''',
'''BridgeTowerForMaskedLM''',
'''BridgeTowerModel''',
'''BridgeTowerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_bridgetower import (
BRIDGETOWER_PRETRAINED_CONFIG_ARCHIVE_MAP,
BridgeTowerConfig,
BridgeTowerTextConfig,
BridgeTowerVisionConfig,
)
from .processing_bridgetower import BridgeTowerProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_bridgetower import BridgeTowerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_bridgetower import (
BRIDGETOWER_PRETRAINED_MODEL_ARCHIVE_LIST,
BridgeTowerForContrastiveLearning,
BridgeTowerForImageAndTextRetrieval,
BridgeTowerForMaskedLM,
BridgeTowerModel,
BridgeTowerPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
| 47 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available
SCREAMING_SNAKE_CASE__ = {'''tokenization_herbert''': ['''HerbertTokenizer''']}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ = ['''HerbertTokenizerFast''']
if TYPE_CHECKING:
from .tokenization_herbert import HerbertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_herbert_fast import HerbertTokenizerFast
else:
import sys
SCREAMING_SNAKE_CASE__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 47 |
from string import ascii_lowercase, ascii_uppercase
def UpperCAmelCase__ ( lowerCamelCase_ : str ):
if not sentence:
return ""
__a : Union[str, Any] = dict(zip(lowerCamelCase_ , lowerCamelCase_ ) )
return lower_to_upper.get(sentence[0] , sentence[0] ) + sentence[1:]
if __name__ == "__main__":
from doctest import testmod
testmod()
| 47 | 1 |
import json
import os
from functools import lru_cache
from typing import Dict, List, Optional, Tuple, Union
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...tokenization_utils_base import BatchEncoding, EncodedInput
from ...utils import PaddingStrategy, logging
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ = {'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt'''}
# See all LED models at https://huggingface.co/models?filter=LED
SCREAMING_SNAKE_CASE__ = {
'''vocab_file''': {
'''allenai/led-base-16384''': '''https://huggingface.co/allenai/led-base-16384/resolve/main/vocab.json''',
},
'''merges_file''': {
'''allenai/led-base-16384''': '''https://huggingface.co/allenai/led-base-16384/resolve/main/merges.txt''',
},
'''tokenizer_file''': {
'''allenai/led-base-16384''': '''https://huggingface.co/allenai/led-base-16384/resolve/main/tokenizer.json''',
},
}
SCREAMING_SNAKE_CASE__ = {
'''allenai/led-base-16384''': 1_6384,
}
@lru_cache()
# Copied from transformers.models.bart.tokenization_bart.bytes_to_unicode
def UpperCAmelCase__ ( ):
__a : List[str] = (
list(range(ord('!' ) , ord('~' ) + 1 ) ) + list(range(ord('¡' ) , ord('¬' ) + 1 ) ) + list(range(ord('®' ) , ord('ÿ' ) + 1 ) )
)
__a : str = bs[:]
__a : List[Any] = 0
for b in range(2**8 ):
if b not in bs:
bs.append(lowerCamelCase_ )
cs.append(2**8 + n )
n += 1
__a : Dict = [chr(lowerCamelCase_ ) for n in cs]
return dict(zip(lowerCamelCase_ , lowerCamelCase_ ) )
def UpperCAmelCase__ ( lowerCamelCase_ : Optional[Any] ):
__a : Optional[Any] = set()
__a : Optional[Any] = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
__a : Dict = char
return pairs
class _UpperCamelCase( __lowerCamelCase ):
__SCREAMING_SNAKE_CASE : Tuple = VOCAB_FILES_NAMES
__SCREAMING_SNAKE_CASE : Any = PRETRAINED_VOCAB_FILES_MAP
__SCREAMING_SNAKE_CASE : List[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__SCREAMING_SNAKE_CASE : int = ['''input_ids''', '''attention_mask''']
def __init__( self : Dict , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Tuple="replace" , SCREAMING_SNAKE_CASE__ : int="<s>" , SCREAMING_SNAKE_CASE__ : List[str]="</s>" , SCREAMING_SNAKE_CASE__ : int="</s>" , SCREAMING_SNAKE_CASE__ : List[Any]="<s>" , SCREAMING_SNAKE_CASE__ : List[Any]="<unk>" , SCREAMING_SNAKE_CASE__ : Union[str, Any]="<pad>" , SCREAMING_SNAKE_CASE__ : List[Any]="<mask>" , SCREAMING_SNAKE_CASE__ : Optional[int]=False , **SCREAMING_SNAKE_CASE__ : Optional[Any] , ):
'''simple docstring'''
__a : Union[str, Any] = AddedToken(SCREAMING_SNAKE_CASE__ , lstrip=SCREAMING_SNAKE_CASE__ , rstrip=SCREAMING_SNAKE_CASE__ ) if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) else bos_token
__a : List[Any] = AddedToken(SCREAMING_SNAKE_CASE__ , lstrip=SCREAMING_SNAKE_CASE__ , rstrip=SCREAMING_SNAKE_CASE__ ) if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) else eos_token
__a : int = AddedToken(SCREAMING_SNAKE_CASE__ , lstrip=SCREAMING_SNAKE_CASE__ , rstrip=SCREAMING_SNAKE_CASE__ ) if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) else sep_token
__a : Optional[int] = AddedToken(SCREAMING_SNAKE_CASE__ , lstrip=SCREAMING_SNAKE_CASE__ , rstrip=SCREAMING_SNAKE_CASE__ ) if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) else cls_token
__a : Optional[int] = AddedToken(SCREAMING_SNAKE_CASE__ , lstrip=SCREAMING_SNAKE_CASE__ , rstrip=SCREAMING_SNAKE_CASE__ ) if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) else unk_token
__a : Dict = AddedToken(SCREAMING_SNAKE_CASE__ , lstrip=SCREAMING_SNAKE_CASE__ , rstrip=SCREAMING_SNAKE_CASE__ ) if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
__a : Tuple = AddedToken(SCREAMING_SNAKE_CASE__ , lstrip=SCREAMING_SNAKE_CASE__ , rstrip=SCREAMING_SNAKE_CASE__ ) if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) else mask_token
super().__init__(
errors=SCREAMING_SNAKE_CASE__ , bos_token=SCREAMING_SNAKE_CASE__ , eos_token=SCREAMING_SNAKE_CASE__ , unk_token=SCREAMING_SNAKE_CASE__ , sep_token=SCREAMING_SNAKE_CASE__ , cls_token=SCREAMING_SNAKE_CASE__ , pad_token=SCREAMING_SNAKE_CASE__ , mask_token=SCREAMING_SNAKE_CASE__ , add_prefix_space=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ , )
with open(SCREAMING_SNAKE_CASE__ , encoding='utf-8' ) as vocab_handle:
__a : List[Any] = json.load(SCREAMING_SNAKE_CASE__ )
__a : List[str] = {v: k for k, v in self.encoder.items()}
__a : Dict = errors # how to handle errors in decoding
__a : List[Any] = bytes_to_unicode()
__a : List[Any] = {v: k for k, v in self.byte_encoder.items()}
with open(SCREAMING_SNAKE_CASE__ , encoding='utf-8' ) as merges_handle:
__a : str = merges_handle.read().split('\n' )[1:-1]
__a : List[str] = [tuple(merge.split() ) for merge in bpe_merges]
__a : str = dict(zip(SCREAMING_SNAKE_CASE__ , range(len(SCREAMING_SNAKE_CASE__ ) ) ) )
__a : Tuple = {}
__a : Union[str, Any] = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
__a : Optional[Any] = re.compile(r'\'s|\'t|\'re|\'ve|\'m|\'ll|\'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+' )
@property
# Copied from transformers.models.bart.tokenization_bart.BartTokenizer.vocab_size
def __lowerCAmelCase ( self : List[str] ):
'''simple docstring'''
return len(self.encoder )
def __lowerCAmelCase ( self : Dict ):
'''simple docstring'''
return dict(self.encoder , **self.added_tokens_encoder )
def __lowerCAmelCase ( self : int , SCREAMING_SNAKE_CASE__ : Dict ):
'''simple docstring'''
if token in self.cache:
return self.cache[token]
__a : List[Any] = tuple(SCREAMING_SNAKE_CASE__ )
__a : Tuple = get_pairs(SCREAMING_SNAKE_CASE__ )
if not pairs:
return token
while True:
__a : Tuple = min(SCREAMING_SNAKE_CASE__ , key=lambda SCREAMING_SNAKE_CASE__ : self.bpe_ranks.get(SCREAMING_SNAKE_CASE__ , float('inf' ) ) )
if bigram not in self.bpe_ranks:
break
__a , __a : str = bigram
__a : Optional[Any] = []
__a : List[str] = 0
while i < len(SCREAMING_SNAKE_CASE__ ):
try:
__a : str = word.index(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
__a : List[Any] = j
if word[i] == first and i < len(SCREAMING_SNAKE_CASE__ ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
__a : Optional[int] = tuple(SCREAMING_SNAKE_CASE__ )
__a : Union[str, Any] = new_word
if len(SCREAMING_SNAKE_CASE__ ) == 1:
break
else:
__a : Dict = get_pairs(SCREAMING_SNAKE_CASE__ )
__a : Tuple = ' '.join(SCREAMING_SNAKE_CASE__ )
__a : List[Any] = word
return word
def __lowerCAmelCase ( self : str , SCREAMING_SNAKE_CASE__ : Any ):
'''simple docstring'''
__a : int = []
for token in re.findall(self.pat , SCREAMING_SNAKE_CASE__ ):
__a : Optional[Any] = ''.join(
self.byte_encoder[b] for b in token.encode('utf-8' ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(SCREAMING_SNAKE_CASE__ ).split(' ' ) )
return bpe_tokens
def __lowerCAmelCase ( self : Union[str, Any] , SCREAMING_SNAKE_CASE__ : str ):
'''simple docstring'''
return self.encoder.get(SCREAMING_SNAKE_CASE__ , self.encoder.get(self.unk_token ) )
def __lowerCAmelCase ( self : List[Any] , SCREAMING_SNAKE_CASE__ : Optional[Any] ):
'''simple docstring'''
return self.decoder.get(SCREAMING_SNAKE_CASE__ )
def __lowerCAmelCase ( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : Union[str, Any] ):
'''simple docstring'''
__a : Optional[Any] = ''.join(SCREAMING_SNAKE_CASE__ )
__a : Optional[Any] = bytearray([self.byte_decoder[c] for c in text] ).decode('utf-8' , errors=self.errors )
return text
def __lowerCAmelCase ( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Optional[str] = None ):
'''simple docstring'''
if not os.path.isdir(SCREAMING_SNAKE_CASE__ ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
__a : int = os.path.join(
SCREAMING_SNAKE_CASE__ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
__a : Optional[int] = os.path.join(
SCREAMING_SNAKE_CASE__ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['merges_file'] )
with open(SCREAMING_SNAKE_CASE__ , 'w' , encoding='utf-8' ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=SCREAMING_SNAKE_CASE__ , ensure_ascii=SCREAMING_SNAKE_CASE__ ) + '\n' )
__a : List[Any] = 0
with open(SCREAMING_SNAKE_CASE__ , 'w' , encoding='utf-8' ) as writer:
writer.write('#version: 0.2\n' )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda SCREAMING_SNAKE_CASE__ : kv[1] ):
if index != token_index:
logger.warning(
f'''Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.'''
' Please check that the tokenizer is not corrupted!' )
__a : Optional[int] = token_index
writer.write(' '.join(SCREAMING_SNAKE_CASE__ ) + '\n' )
index += 1
return vocab_file, merge_file
def __lowerCAmelCase ( self : Dict , SCREAMING_SNAKE_CASE__ : List[int] , SCREAMING_SNAKE_CASE__ : Optional[List[int]] = None ):
'''simple docstring'''
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
__a : Union[str, Any] = [self.cls_token_id]
__a : str = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def __lowerCAmelCase ( self : Union[str, Any] , SCREAMING_SNAKE_CASE__ : List[int] , SCREAMING_SNAKE_CASE__ : Optional[List[int]] = None , SCREAMING_SNAKE_CASE__ : bool = False ):
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=SCREAMING_SNAKE_CASE__ , token_ids_a=SCREAMING_SNAKE_CASE__ , already_has_special_tokens=SCREAMING_SNAKE_CASE__ )
if token_ids_a is None:
return [1] + ([0] * len(SCREAMING_SNAKE_CASE__ )) + [1]
return [1] + ([0] * len(SCREAMING_SNAKE_CASE__ )) + [1, 1] + ([0] * len(SCREAMING_SNAKE_CASE__ )) + [1]
def __lowerCAmelCase ( self : int , SCREAMING_SNAKE_CASE__ : List[int] , SCREAMING_SNAKE_CASE__ : Optional[List[int]] = None ):
'''simple docstring'''
__a : Any = [self.sep_token_id]
__a : Union[str, Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def __lowerCAmelCase ( self : Any , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : List[str]=False , **SCREAMING_SNAKE_CASE__ : Optional[Any] ):
'''simple docstring'''
__a : Dict = kwargs.pop('add_prefix_space' , self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(SCREAMING_SNAKE_CASE__ ) > 0 and not text[0].isspace()):
__a : Tuple = ' ' + text
return (text, kwargs)
def __lowerCAmelCase ( self : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Union[Dict[str, EncodedInput], BatchEncoding] , SCREAMING_SNAKE_CASE__ : Optional[int] = None , SCREAMING_SNAKE_CASE__ : PaddingStrategy = PaddingStrategy.DO_NOT_PAD , SCREAMING_SNAKE_CASE__ : Optional[int] = None , SCREAMING_SNAKE_CASE__ : Optional[bool] = None , ):
'''simple docstring'''
__a : Union[str, Any] = super()._pad(
encoded_inputs=SCREAMING_SNAKE_CASE__ , max_length=SCREAMING_SNAKE_CASE__ , padding_strategy=SCREAMING_SNAKE_CASE__ , pad_to_multiple_of=SCREAMING_SNAKE_CASE__ , return_attention_mask=SCREAMING_SNAKE_CASE__ , )
# Load from model defaults
if return_attention_mask is None:
__a : Tuple = 'attention_mask' in self.model_input_names
if return_attention_mask and "global_attention_mask" in encoded_inputs:
__a : Optional[Any] = encoded_inputs[self.model_input_names[0]]
# `global_attention_mask` need to have the same length as other (sequential) inputs.
__a : int = len(encoded_inputs['global_attention_mask'] ) != len(SCREAMING_SNAKE_CASE__ )
if needs_to_be_padded:
__a : Any = len(SCREAMING_SNAKE_CASE__ ) - len(encoded_inputs['global_attention_mask'] )
if self.padding_side == "right":
# Use `-1` since `0` in `global_attention_mask` means `local attention` instead of `not to attend`
__a : Optional[Any] = (
encoded_inputs['global_attention_mask'] + [-1] * difference
)
elif self.padding_side == "left":
__a : List[str] = [-1] * difference + encoded_inputs[
'global_attention_mask'
]
else:
raise ValueError('Invalid padding strategy:' + str(self.padding_side ) )
return encoded_inputs
| 47 |
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ = {
'''asapp/sew-d-tiny-100k''': '''https://huggingface.co/asapp/sew-d-tiny-100k/resolve/main/config.json''',
# See all SEW-D models at https://huggingface.co/models?filter=sew-d
}
class _UpperCamelCase( __lowerCamelCase ):
__SCREAMING_SNAKE_CASE : Optional[Any] = '''sew-d'''
def __init__( self : Dict , SCREAMING_SNAKE_CASE__ : Dict=3_2 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=7_6_8 , SCREAMING_SNAKE_CASE__ : Optional[Any]=1_2 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=1_2 , SCREAMING_SNAKE_CASE__ : str=3_0_7_2 , SCREAMING_SNAKE_CASE__ : str=2 , SCREAMING_SNAKE_CASE__ : Optional[Any]=5_1_2 , SCREAMING_SNAKE_CASE__ : List[str]=2_5_6 , SCREAMING_SNAKE_CASE__ : Optional[Any]=True , SCREAMING_SNAKE_CASE__ : Tuple=True , SCREAMING_SNAKE_CASE__ : List[str]=("p2c", "c2p") , SCREAMING_SNAKE_CASE__ : str="layer_norm" , SCREAMING_SNAKE_CASE__ : Tuple="gelu_python" , SCREAMING_SNAKE_CASE__ : Tuple=0.1 , SCREAMING_SNAKE_CASE__ : Optional[int]=0.1 , SCREAMING_SNAKE_CASE__ : Any=0.1 , SCREAMING_SNAKE_CASE__ : Any=0.0 , SCREAMING_SNAKE_CASE__ : str=0.1 , SCREAMING_SNAKE_CASE__ : Optional[int]=0.02 , SCREAMING_SNAKE_CASE__ : int=1e-7 , SCREAMING_SNAKE_CASE__ : Any=1e-5 , SCREAMING_SNAKE_CASE__ : Optional[int]="group" , SCREAMING_SNAKE_CASE__ : Optional[Any]="gelu" , SCREAMING_SNAKE_CASE__ : Optional[int]=(6_4, 1_2_8, 1_2_8, 1_2_8, 1_2_8, 2_5_6, 2_5_6, 2_5_6, 2_5_6, 5_1_2, 5_1_2, 5_1_2, 5_1_2) , SCREAMING_SNAKE_CASE__ : List[Any]=(5, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1) , SCREAMING_SNAKE_CASE__ : str=(1_0, 3, 1, 3, 1, 3, 1, 3, 1, 2, 1, 2, 1) , SCREAMING_SNAKE_CASE__ : Optional[int]=False , SCREAMING_SNAKE_CASE__ : Optional[int]=1_2_8 , SCREAMING_SNAKE_CASE__ : Tuple=1_6 , SCREAMING_SNAKE_CASE__ : Any=True , SCREAMING_SNAKE_CASE__ : List[Any]=0.05 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=1_0 , SCREAMING_SNAKE_CASE__ : List[Any]=2 , SCREAMING_SNAKE_CASE__ : int=0.0 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=1_0 , SCREAMING_SNAKE_CASE__ : Optional[int]=0 , SCREAMING_SNAKE_CASE__ : Optional[int]="mean" , SCREAMING_SNAKE_CASE__ : List[Any]=False , SCREAMING_SNAKE_CASE__ : List[str]=False , SCREAMING_SNAKE_CASE__ : str=2_5_6 , SCREAMING_SNAKE_CASE__ : str=0 , SCREAMING_SNAKE_CASE__ : List[Any]=1 , SCREAMING_SNAKE_CASE__ : List[Any]=2 , **SCREAMING_SNAKE_CASE__ : Any , ):
'''simple docstring'''
super().__init__(**SCREAMING_SNAKE_CASE__ , pad_token_id=SCREAMING_SNAKE_CASE__ , bos_token_id=SCREAMING_SNAKE_CASE__ , eos_token_id=SCREAMING_SNAKE_CASE__ )
__a : Optional[int] = hidden_size
__a : Optional[Any] = feat_extract_norm
__a : List[str] = feat_extract_activation
__a : Dict = list(SCREAMING_SNAKE_CASE__ )
__a : Union[str, Any] = list(SCREAMING_SNAKE_CASE__ )
__a : List[str] = list(SCREAMING_SNAKE_CASE__ )
__a : int = conv_bias
__a : Tuple = num_conv_pos_embeddings
__a : List[str] = num_conv_pos_embedding_groups
__a : Optional[Any] = len(self.conv_dim )
__a : Union[str, Any] = num_hidden_layers
__a : Optional[Any] = intermediate_size
__a : Union[str, Any] = squeeze_factor
__a : List[Any] = max_position_embeddings
__a : Tuple = position_buckets
__a : Optional[int] = share_att_key
__a : List[str] = relative_attention
__a : Any = norm_rel_ebd
__a : Any = list(SCREAMING_SNAKE_CASE__ )
__a : Union[str, Any] = hidden_act
__a : str = num_attention_heads
__a : Union[str, Any] = hidden_dropout
__a : Optional[int] = attention_dropout
__a : List[str] = activation_dropout
__a : int = feat_proj_dropout
__a : int = final_dropout
__a : Dict = layer_norm_eps
__a : Tuple = feature_layer_norm_eps
__a : str = initializer_range
__a : Tuple = vocab_size
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
'Configuration for convolutional layers is incorrect.'
'It is required that `len(config.conv_dim)` == `len(config.conv_stride)` == `len(config.conv_kernel)`,'
f'''but is `len(config.conv_dim) = {len(self.conv_dim )}`, `len(config.conv_stride)'''
f'''= {len(self.conv_stride )}`, `len(config.conv_kernel) = {len(self.conv_kernel )}`.''' )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
__a : Tuple = apply_spec_augment
__a : Optional[Any] = mask_time_prob
__a : Any = mask_time_length
__a : List[str] = mask_time_min_masks
__a : List[str] = mask_feature_prob
__a : Tuple = mask_feature_length
__a : Any = mask_feature_min_masks
# ctc loss
__a : Optional[int] = ctc_loss_reduction
__a : List[Any] = ctc_zero_infinity
# sequence classification
__a : Dict = use_weighted_layer_sum
__a : Optional[Any] = classifier_proj_size
@property
def __lowerCAmelCase ( self : Optional[Any] ):
'''simple docstring'''
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 47 | 1 |
import argparse
import torch
from transformers import BlenderbotConfig, BlenderbotForConditionalGeneration
from transformers.utils import logging
logging.set_verbosity_info()
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ = [
['''attention''', '''attn'''],
['''encoder_attention''', '''encoder_attn'''],
['''q_lin''', '''q_proj'''],
['''k_lin''', '''k_proj'''],
['''v_lin''', '''v_proj'''],
['''out_lin''', '''out_proj'''],
['''norm_embeddings''', '''layernorm_embedding'''],
['''position_embeddings''', '''embed_positions'''],
['''embeddings''', '''embed_tokens'''],
['''ffn.lin''', '''fc'''],
]
def UpperCAmelCase__ ( lowerCamelCase_ : Union[str, Any] ):
if k == "embeddings.weight":
return "shared.weight"
for parlai_name, hf_name in PATTERNS:
__a : List[str] = k.replace(lowerCamelCase_ , lowerCamelCase_ )
if k.startswith('encoder' ):
__a : str = k.replace('.attn' , '.self_attn' )
__a : Tuple = k.replace('norm1' , 'self_attn_layer_norm' )
__a : Any = k.replace('norm2' , 'final_layer_norm' )
elif k.startswith('decoder' ):
__a : int = k.replace('norm1' , 'self_attn_layer_norm' )
__a : Tuple = k.replace('norm2' , 'encoder_attn_layer_norm' )
__a : Optional[Any] = k.replace('norm3' , 'final_layer_norm' )
return k
def UpperCAmelCase__ ( lowerCamelCase_ : List[Any] ):
__a : str = [
'model.encoder.layernorm_embedding.weight',
'model.encoder.layernorm_embedding.bias',
'model.decoder.layernorm_embedding.weight',
'model.decoder.layernorm_embedding.bias',
]
for k in keys:
__a : Optional[int] = sd.pop(lowerCamelCase_ )
__a : List[Any] = k.replace('layernorm_embedding' , 'layer_norm' )
assert new_k not in sd
__a : Tuple = v
SCREAMING_SNAKE_CASE__ = ['''START''']
@torch.no_grad()
def UpperCAmelCase__ ( lowerCamelCase_ : List[Any] , lowerCamelCase_ : List[Any] , lowerCamelCase_ : List[Any] ):
__a : str = torch.load(lowerCamelCase_ , map_location='cpu' )
__a : Any = model['model']
__a : Optional[int] = BlenderbotConfig.from_json_file(lowerCamelCase_ )
__a : Any = BlenderbotForConditionalGeneration(lowerCamelCase_ )
__a : Dict = m.model.state_dict().keys()
__a : Tuple = []
__a : int = {}
for k, v in sd.items():
if k in IGNORE_KEYS:
continue
__a : Union[str, Any] = rename_state_dict_key(lowerCamelCase_ )
if new_k not in valid_keys:
failures.append([k, new_k] )
else:
__a : str = v
if cfg.normalize_before: # Blenderbot-3B checkpoints. Rename layernorm_embedding -> layer_norm
rename_layernorm_keys(lowerCamelCase_ )
m.model.load_state_dict(lowerCamelCase_ , strict=lowerCamelCase_ )
m.half()
m.save_pretrained(lowerCamelCase_ )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument('''--src_path''', type=str, help='''like blenderbot-model.bin''')
parser.add_argument('''--save_dir''', default='''hf_blenderbot''', type=str, help='''Where to save converted model.''')
parser.add_argument(
'''--hf_config_json''', default='''blenderbot-3b-config.json''', type=str, help='''Path to config to use'''
)
SCREAMING_SNAKE_CASE__ = parser.parse_args()
convert_parlai_checkpoint(args.src_path, args.save_dir, args.hf_config_json)
| 47 |
from __future__ import annotations
from sys import maxsize
from typing import Generic, TypeVar
SCREAMING_SNAKE_CASE__ = TypeVar('''T''')
def UpperCAmelCase__ ( lowerCamelCase_ : int ):
return (position - 1) // 2
def UpperCAmelCase__ ( lowerCamelCase_ : int ):
return (2 * position) + 1
def UpperCAmelCase__ ( lowerCamelCase_ : int ):
return (2 * position) + 2
class _UpperCamelCase( Generic[T] ):
def __init__( self : List[str] ):
'''simple docstring'''
__a : list[tuple[T, int]] = []
__a : dict[T, int] = {}
__a : int = 0
def __len__( self : Any ):
'''simple docstring'''
return self.elements
def __repr__( self : Any ):
'''simple docstring'''
return str(self.heap )
def __lowerCAmelCase ( self : Optional[Any] ):
'''simple docstring'''
return self.elements == 0
def __lowerCAmelCase ( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : T , SCREAMING_SNAKE_CASE__ : int ):
'''simple docstring'''
self.heap.append((elem, weight) )
__a : List[Any] = self.elements
self.elements += 1
self._bubble_up(SCREAMING_SNAKE_CASE__ )
def __lowerCAmelCase ( self : Tuple ):
'''simple docstring'''
if self.elements > 1:
self._swap_nodes(0 , self.elements - 1 )
__a , __a : Union[str, Any] = self.heap.pop()
del self.position_map[elem]
self.elements -= 1
if self.elements > 0:
__a , __a : Dict = self.heap[0]
self._bubble_down(SCREAMING_SNAKE_CASE__ )
return elem
def __lowerCAmelCase ( self : List[str] , SCREAMING_SNAKE_CASE__ : T , SCREAMING_SNAKE_CASE__ : int ):
'''simple docstring'''
__a : List[Any] = self.position_map[elem]
__a : str = (elem, weight)
if position > 0:
__a : Tuple = get_parent_position(SCREAMING_SNAKE_CASE__ )
__a , __a : Dict = self.heap[parent_position]
if parent_weight > weight:
self._bubble_up(SCREAMING_SNAKE_CASE__ )
else:
self._bubble_down(SCREAMING_SNAKE_CASE__ )
else:
self._bubble_down(SCREAMING_SNAKE_CASE__ )
def __lowerCAmelCase ( self : Any , SCREAMING_SNAKE_CASE__ : T ):
'''simple docstring'''
__a : List[Any] = self.position_map[elem]
if curr_pos == 0:
return None
__a : List[str] = get_parent_position(SCREAMING_SNAKE_CASE__ )
__a , __a : str = self.heap[curr_pos]
__a , __a : Optional[int] = self.heap[parent_position]
if parent_weight > weight:
self._swap_nodes(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
return self._bubble_up(SCREAMING_SNAKE_CASE__ )
return None
def __lowerCAmelCase ( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : T ):
'''simple docstring'''
__a : int = self.position_map[elem]
__a , __a : Optional[Any] = self.heap[curr_pos]
__a : Tuple = get_child_left_position(SCREAMING_SNAKE_CASE__ )
__a : Optional[Any] = get_child_right_position(SCREAMING_SNAKE_CASE__ )
if child_left_position < self.elements and child_right_position < self.elements:
__a , __a : str = self.heap[child_left_position]
__a , __a : List[str] = self.heap[child_right_position]
if child_right_weight < child_left_weight and child_right_weight < weight:
self._swap_nodes(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
return self._bubble_down(SCREAMING_SNAKE_CASE__ )
if child_left_position < self.elements:
__a , __a : Any = self.heap[child_left_position]
if child_left_weight < weight:
self._swap_nodes(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
return self._bubble_down(SCREAMING_SNAKE_CASE__ )
else:
return None
if child_right_position < self.elements:
__a , __a : Union[str, Any] = self.heap[child_right_position]
if child_right_weight < weight:
self._swap_nodes(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
return self._bubble_down(SCREAMING_SNAKE_CASE__ )
return None
def __lowerCAmelCase ( self : List[Any] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int ):
'''simple docstring'''
__a : Optional[Any] = self.heap[nodea_pos][0]
__a : str = self.heap[nodea_pos][0]
__a , __a : int = (
self.heap[nodea_pos],
self.heap[nodea_pos],
)
__a : str = nodea_pos
__a : Optional[int] = nodea_pos
class _UpperCamelCase( Generic[T] ):
def __init__( self : List[Any] ):
'''simple docstring'''
__a : dict[T, dict[T, int]] = {}
__a : int = 0
def __repr__( self : Tuple ):
'''simple docstring'''
return str(self.connections )
def __len__( self : Dict ):
'''simple docstring'''
return self.nodes
def __lowerCAmelCase ( self : Optional[int] , SCREAMING_SNAKE_CASE__ : T ):
'''simple docstring'''
if node not in self.connections:
__a : Tuple = {}
self.nodes += 1
def __lowerCAmelCase ( self : Any , SCREAMING_SNAKE_CASE__ : T , SCREAMING_SNAKE_CASE__ : T , SCREAMING_SNAKE_CASE__ : int ):
'''simple docstring'''
self.add_node(SCREAMING_SNAKE_CASE__ )
self.add_node(SCREAMING_SNAKE_CASE__ )
__a : Optional[Any] = weight
__a : Any = weight
def UpperCAmelCase__ ( lowerCamelCase_ : GraphUndirectedWeighted[T] , ):
__a : dict[T, int] = {node: maxsize for node in graph.connections}
__a : dict[T, T | None] = {node: None for node in graph.connections}
__a : MinPriorityQueue[T] = MinPriorityQueue()
for node, weight in dist.items():
priority_queue.push(lowerCamelCase_ , lowerCamelCase_ )
if priority_queue.is_empty():
return dist, parent
# initialization
__a : Optional[int] = priority_queue.extract_min()
__a : int = 0
for neighbour in graph.connections[node]:
if dist[neighbour] > dist[node] + graph.connections[node][neighbour]:
__a : str = dist[node] + graph.connections[node][neighbour]
priority_queue.update_key(lowerCamelCase_ , dist[neighbour] )
__a : Optional[int] = node
# running prim's algorithm
while not priority_queue.is_empty():
__a : Any = priority_queue.extract_min()
for neighbour in graph.connections[node]:
if dist[neighbour] > dist[node] + graph.connections[node][neighbour]:
__a : Tuple = dist[node] + graph.connections[node][neighbour]
priority_queue.update_key(lowerCamelCase_ , dist[neighbour] )
__a : Dict = node
return dist, parent
| 47 | 1 |
def UpperCAmelCase__ ( lowerCamelCase_ : list[list] ):
__a : List[str] = current_set.copy()
for row_index, row in enumerate(lowerCamelCase_ ):
__a : Optional[int] = row[0]
for column_index, column in enumerate(lowerCamelCase_ ):
if magnitude == 0:
__a : Optional[Any] = column
continue
__a : Any = column / magnitude
# Subtract to cancel term
__a : Optional[int] = current_set[0]
__a : Union[str, Any] = [first_row]
__a : str = current_set[1::]
for row in current_set:
__a : List[Any] = []
# If first term is 0, it is already in form we want, so we preserve it
if row[0] == 0:
final_set.append(lowerCamelCase_ )
continue
for column_index in range(len(lowerCamelCase_ ) ):
temp_row.append(first_row[column_index] - row[column_index] )
final_set.append(lowerCamelCase_ )
# Create next recursion iteration set
if len(final_set[0] ) != 3:
__a : int = final_set[0]
__a : List[Any] = []
__a : Optional[int] = []
for row in final_set[1::]:
current_first_column.append(row[0] )
next_iteration.append(row[1::] )
__a : int = simplify(lowerCamelCase_ )
for i in range(len(lowerCamelCase_ ) ):
resultant[i].insert(0 , current_first_column[i] )
resultant.insert(0 , lowerCamelCase_ )
__a : List[str] = resultant
return final_set
def UpperCAmelCase__ ( lowerCamelCase_ : list[list] ):
if len(lowerCamelCase_ ) == 0:
raise IndexError('solve_simultaneous() requires n lists of length n+1' )
__a : Tuple = len(lowerCamelCase_ ) + 1
if any(len(lowerCamelCase_ ) != _length for item in equations ):
raise IndexError('solve_simultaneous() requires n lists of length n+1' )
for row in equations:
if any(not isinstance(lowerCamelCase_ , (int, float) ) for column in row ):
raise ValueError('solve_simultaneous() requires lists of integers' )
if len(lowerCamelCase_ ) == 1:
return [equations[0][-1] / equations[0][0]]
__a : Tuple = equations.copy()
if any(0 in row for row in data_set ):
__a : Union[str, Any] = data_set.copy()
__a : List[str] = []
for row_index, row in enumerate(lowerCamelCase_ ):
if 0 not in row:
__a : List[str] = data_set.pop(lowerCamelCase_ )
break
if not full_row:
raise ValueError('solve_simultaneous() requires at least 1 full equation' )
data_set.insert(0 , lowerCamelCase_ )
__a : int = data_set.copy()
__a : List[Any] = simplify(lowerCamelCase_ )
__a : List[Any] = simplified[::-1]
__a : list = []
for row in simplified:
__a : List[str] = row[-1]
if not solutions:
if row[-2] == 0:
solutions.append(0 )
continue
solutions.append(current_solution / row[-2] )
continue
__a : Union[str, Any] = row.copy()[: len(lowerCamelCase_ ) - 1 :]
while temp_row[0] == 0:
temp_row.pop(0 )
if len(lowerCamelCase_ ) == 0:
solutions.append(0 )
continue
__a : Dict = temp_row[1::]
__a : Any = temp_row[::-1]
for column_index, column in enumerate(lowerCamelCase_ ):
current_solution -= column * solutions[column_index]
solutions.append(lowerCamelCase_ )
__a : Optional[Any] = []
for item in solutions:
final.append(float(round(lowerCamelCase_ , 5 ) ) )
return final[::-1]
if __name__ == "__main__":
import doctest
doctest.testmod()
SCREAMING_SNAKE_CASE__ = [
[2, 1, 1, 1, 1, 4],
[1, 2, 1, 1, 1, 5],
[1, 1, 2, 1, 1, 6],
[1, 1, 1, 2, 1, 7],
[1, 1, 1, 1, 2, 8],
]
print(solve_simultaneous(eq))
print(solve_simultaneous([[4, 2]]))
| 47 |
from collections.abc import Sequence
from queue import Queue
class _UpperCamelCase:
def __init__( self : Tuple , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : int=None , SCREAMING_SNAKE_CASE__ : Tuple=None ):
'''simple docstring'''
__a : Tuple = start
__a : Dict = end
__a : List[str] = val
__a : List[Any] = (start + end) // 2
__a : Optional[Any] = left
__a : List[str] = right
def __repr__( self : Dict ):
'''simple docstring'''
return f'''SegmentTreeNode(start={self.start}, end={self.end}, val={self.val})'''
class _UpperCamelCase:
def __init__( self : List[str] , SCREAMING_SNAKE_CASE__ : Sequence , SCREAMING_SNAKE_CASE__ : Optional[Any] ):
'''simple docstring'''
__a : Tuple = collection
__a : Dict = function
if self.collection:
__a : int = self._build_tree(0 , len(SCREAMING_SNAKE_CASE__ ) - 1 )
def __lowerCAmelCase ( self : Optional[int] , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : int ):
'''simple docstring'''
self._update_tree(self.root , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def __lowerCAmelCase ( self : str , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : List[str] ):
'''simple docstring'''
return self._query_range(self.root , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def __lowerCAmelCase ( self : Optional[int] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Dict ):
'''simple docstring'''
if start == end:
return SegmentTreeNode(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , self.collection[start] )
__a : Tuple = (start + end) // 2
__a : Optional[int] = self._build_tree(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
__a : Tuple = self._build_tree(mid + 1 , SCREAMING_SNAKE_CASE__ )
return SegmentTreeNode(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , self.fn(left.val , right.val ) , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def __lowerCAmelCase ( self : List[Any] , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : List[Any] ):
'''simple docstring'''
if node.start == i and node.end == i:
__a : Optional[Any] = val
return
if i <= node.mid:
self._update_tree(node.left , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
else:
self._update_tree(node.right , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
__a : int = self.fn(node.left.val , node.right.val )
def __lowerCAmelCase ( self : Any , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Optional[Any] ):
'''simple docstring'''
if node.start == i and node.end == j:
return node.val
if i <= node.mid:
if j <= node.mid:
# range in left child tree
return self._query_range(node.left , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
else:
# range in left child tree and right child tree
return self.fn(
self._query_range(node.left , SCREAMING_SNAKE_CASE__ , node.mid ) , self._query_range(node.right , node.mid + 1 , SCREAMING_SNAKE_CASE__ ) , )
else:
# range in right child tree
return self._query_range(node.right , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def __lowerCAmelCase ( self : List[Any] ):
'''simple docstring'''
if self.root is not None:
__a : Tuple = Queue()
queue.put(self.root )
while not queue.empty():
__a : Tuple = queue.get()
yield node
if node.left is not None:
queue.put(node.left )
if node.right is not None:
queue.put(node.right )
if __name__ == "__main__":
import operator
for fn in [operator.add, max, min]:
print('''*''' * 50)
SCREAMING_SNAKE_CASE__ = SegmentTree([2, 1, 5, 3, 4], fn)
for node in arr.traverse():
print(node)
print()
arr.update(1, 5)
for node in arr.traverse():
print(node)
print()
print(arr.query_range(3, 4)) # 7
print(arr.query_range(2, 2)) # 5
print(arr.query_range(1, 3)) # 13
print()
| 47 | 1 |
from __future__ import annotations
def UpperCAmelCase__ ( lowerCamelCase_ : list , lowerCamelCase_ : int | None = None , lowerCamelCase_ : int | None = None ):
if start is None:
__a : Optional[int] = 0
if end is None:
__a : Dict = len(lowerCamelCase_ ) - 1
if start >= end:
return
__a : List[Any] = (start + end) // 2
slowsort(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
slowsort(lowerCamelCase_ , mid + 1 , lowerCamelCase_ )
if sequence[end] < sequence[mid]:
__a , __a : Dict = sequence[mid], sequence[end]
slowsort(lowerCamelCase_ , lowerCamelCase_ , end - 1 )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 47 |
import os
import posixpath
import uuid
from dataclasses import dataclass
from typing import TYPE_CHECKING, Iterable, List, Optional, Tuple, Union
import numpy as np
import pyarrow as pa
import datasets
from datasets.arrow_writer import ArrowWriter, ParquetWriter
from datasets.config import MAX_SHARD_SIZE
from datasets.filesystems import (
is_remote_filesystem,
rename,
)
from datasets.iterable_dataset import _BaseExamplesIterable
from datasets.utils.py_utils import convert_file_size_to_int
SCREAMING_SNAKE_CASE__ = datasets.utils.logging.get_logger(__name__)
if TYPE_CHECKING:
import pyspark
@dataclass
class _UpperCamelCase( datasets.BuilderConfig ):
__SCREAMING_SNAKE_CASE : Optional[datasets.Features] = None
def UpperCAmelCase__ ( lowerCamelCase_ : "pyspark.sql.DataFrame" , lowerCamelCase_ : List[int] , ):
import pyspark
def generate_fn():
__a : List[Any] = df.select('*' , pyspark.sql.functions.spark_partition_id().alias('part_id' ) )
for partition_id in partition_order:
__a : Optional[int] = df_with_partition_id.select('*' ).where(f'''part_id = {partition_id}''' ).drop('part_id' )
__a : Optional[Any] = partition_df.collect()
__a : Union[str, Any] = 0
for row in rows:
yield f'''{partition_id}_{row_id}''', row.asDict()
row_id += 1
return generate_fn
class _UpperCamelCase( _BaseExamplesIterable ):
def __init__( self : Any , SCREAMING_SNAKE_CASE__ : "pyspark.sql.DataFrame" , SCREAMING_SNAKE_CASE__ : Dict=None , ):
'''simple docstring'''
__a : List[str] = df
__a : Tuple = partition_order or range(self.df.rdd.getNumPartitions() )
__a : List[Any] = _generate_iterable_examples(self.df , self.partition_order )
def __iter__( self : Tuple ):
'''simple docstring'''
yield from self.generate_examples_fn()
def __lowerCAmelCase ( self : Optional[int] , SCREAMING_SNAKE_CASE__ : np.random.Generator ):
'''simple docstring'''
__a : Union[str, Any] = list(range(self.df.rdd.getNumPartitions() ) )
generator.shuffle(SCREAMING_SNAKE_CASE__ )
return SparkExamplesIterable(self.df , partition_order=SCREAMING_SNAKE_CASE__ )
def __lowerCAmelCase ( self : str , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int ):
'''simple docstring'''
__a : Union[str, Any] = self.split_shard_indices_by_worker(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
return SparkExamplesIterable(self.df , partition_order=SCREAMING_SNAKE_CASE__ )
@property
def __lowerCAmelCase ( self : Dict ):
'''simple docstring'''
return len(self.partition_order )
class _UpperCamelCase( datasets.DatasetBuilder ):
__SCREAMING_SNAKE_CASE : List[str] = SparkConfig
def __init__( self : List[str] , SCREAMING_SNAKE_CASE__ : "pyspark.sql.DataFrame" , SCREAMING_SNAKE_CASE__ : str = None , SCREAMING_SNAKE_CASE__ : str = None , **SCREAMING_SNAKE_CASE__ : Optional[int] , ):
'''simple docstring'''
import pyspark
__a : int = pyspark.sql.SparkSession.builder.getOrCreate()
__a : Optional[int] = df
__a : List[Any] = working_dir
super().__init__(
cache_dir=SCREAMING_SNAKE_CASE__ , config_name=str(self.df.semanticHash() ) , **SCREAMING_SNAKE_CASE__ , )
def __lowerCAmelCase ( self : Optional[int] ):
'''simple docstring'''
def create_cache_and_write_probe(SCREAMING_SNAKE_CASE__ : List[str] ):
# makedirs with exist_ok will recursively create the directory. It will not throw an error if directories
# already exist.
os.makedirs(self._cache_dir , exist_ok=SCREAMING_SNAKE_CASE__ )
__a : List[Any] = os.path.join(self._cache_dir , 'fs_test' + uuid.uuida().hex )
# Opening the file in append mode will create a new file unless it already exists, in which case it will not
# change the file contents.
open(SCREAMING_SNAKE_CASE__ , 'a' )
return [probe_file]
if self._spark.conf.get('spark.master' , '' ).startswith('local' ):
return
# If the cluster is multi-node, make sure that the user provided a cache_dir and that it is on an NFS
# accessible to the driver.
# TODO: Stream batches to the driver using ArrowCollectSerializer instead of throwing an error.
if self._cache_dir:
__a : List[Any] = (
self._spark.sparkContext.parallelize(range(1 ) , 1 ).mapPartitions(SCREAMING_SNAKE_CASE__ ).collect()
)
if os.path.isfile(probe[0] ):
return
raise ValueError(
'When using Dataset.from_spark on a multi-node cluster, the driver and all workers should be able to access cache_dir' )
def __lowerCAmelCase ( self : List[str] ):
'''simple docstring'''
return datasets.DatasetInfo(features=self.config.features )
def __lowerCAmelCase ( self : int , SCREAMING_SNAKE_CASE__ : datasets.download.download_manager.DownloadManager ):
'''simple docstring'''
return [datasets.SplitGenerator(name=datasets.Split.TRAIN )]
def __lowerCAmelCase ( self : List[str] , SCREAMING_SNAKE_CASE__ : Any ):
'''simple docstring'''
import pyspark
def get_arrow_batch_size(SCREAMING_SNAKE_CASE__ : int ):
for batch in it:
yield pa.RecordBatch.from_pydict({'batch_bytes': [batch.nbytes]} )
__a : List[str] = self.df.count()
__a : Dict = df_num_rows if df_num_rows <= 1_0_0 else 1_0_0
# Approximate the size of each row (in Arrow format) by averaging over a max-100-row sample.
__a : List[str] = (
self.df.limit(SCREAMING_SNAKE_CASE__ )
.repartition(1 )
.mapInArrow(SCREAMING_SNAKE_CASE__ , 'batch_bytes: long' )
.agg(pyspark.sql.functions.sum('batch_bytes' ).alias('sample_bytes' ) )
.collect()[0]
.sample_bytes
/ sample_num_rows
)
__a : Dict = approx_bytes_per_row * df_num_rows
if approx_total_size > max_shard_size:
# Make sure there is at least one row per partition.
__a : Union[str, Any] = min(SCREAMING_SNAKE_CASE__ , int(approx_total_size / max_shard_size ) )
__a : int = self.df.repartition(SCREAMING_SNAKE_CASE__ )
def __lowerCAmelCase ( self : Tuple , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : int , ):
'''simple docstring'''
import pyspark
__a : Any = ParquetWriter if file_format == 'parquet' else ArrowWriter
__a : Union[str, Any] = os.path.join(self._working_dir , os.path.basename(SCREAMING_SNAKE_CASE__ ) ) if self._working_dir else fpath
__a : Optional[int] = file_format == 'parquet'
# Define these so that we don't reference self in write_arrow, which will result in a pickling error due to
# pickling the SparkContext.
__a : List[str] = self.config.features
__a : int = self._writer_batch_size
__a : Union[str, Any] = self._fs.storage_options
def write_arrow(SCREAMING_SNAKE_CASE__ : Optional[int] ):
# Within the same SparkContext, no two task attempts will share the same attempt ID.
__a : Any = pyspark.TaskContext().taskAttemptId()
__a : str = next(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
if first_batch is None:
# Some partitions might not receive any data.
return pa.RecordBatch.from_arrays(
[[task_id], [0], [0]] , names=['task_id', 'num_examples', 'num_bytes'] , )
__a : Any = 0
__a : List[str] = writer_class(
features=SCREAMING_SNAKE_CASE__ , path=working_fpath.replace('SSSSS' , f'''{shard_id:05d}''' ).replace('TTTTT' , f'''{task_id:05d}''' ) , writer_batch_size=SCREAMING_SNAKE_CASE__ , storage_options=SCREAMING_SNAKE_CASE__ , embed_local_files=SCREAMING_SNAKE_CASE__ , )
__a : Optional[Any] = pa.Table.from_batches([first_batch] )
writer.write_table(SCREAMING_SNAKE_CASE__ )
for batch in it:
if max_shard_size is not None and writer._num_bytes >= max_shard_size:
__a , __a : Optional[int] = writer.finalize()
writer.close()
yield pa.RecordBatch.from_arrays(
[[task_id], [num_examples], [num_bytes]] , names=['task_id', 'num_examples', 'num_bytes'] , )
shard_id += 1
__a : Optional[Any] = writer_class(
features=writer._features , path=working_fpath.replace('SSSSS' , f'''{shard_id:05d}''' ).replace('TTTTT' , f'''{task_id:05d}''' ) , writer_batch_size=SCREAMING_SNAKE_CASE__ , storage_options=SCREAMING_SNAKE_CASE__ , embed_local_files=SCREAMING_SNAKE_CASE__ , )
__a : Union[str, Any] = pa.Table.from_batches([batch] )
writer.write_table(SCREAMING_SNAKE_CASE__ )
if writer._num_bytes > 0:
__a , __a : str = writer.finalize()
writer.close()
yield pa.RecordBatch.from_arrays(
[[task_id], [num_examples], [num_bytes]] , names=['task_id', 'num_examples', 'num_bytes'] , )
if working_fpath != fpath:
for file in os.listdir(os.path.dirname(SCREAMING_SNAKE_CASE__ ) ):
__a : Any = os.path.join(os.path.dirname(SCREAMING_SNAKE_CASE__ ) , os.path.basename(SCREAMING_SNAKE_CASE__ ) )
shutil.move(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
__a : Dict = (
self.df.mapInArrow(SCREAMING_SNAKE_CASE__ , 'task_id: long, num_examples: long, num_bytes: long' )
.groupBy('task_id' )
.agg(
pyspark.sql.functions.sum('num_examples' ).alias('total_num_examples' ) , pyspark.sql.functions.sum('num_bytes' ).alias('total_num_bytes' ) , pyspark.sql.functions.count('num_bytes' ).alias('num_shards' ) , pyspark.sql.functions.collect_list('num_examples' ).alias('shard_lengths' ) , )
.collect()
)
for row in stats:
yield row.task_id, (row.total_num_examples, row.total_num_bytes, row.num_shards, row.shard_lengths)
def __lowerCAmelCase ( self : Union[str, Any] , SCREAMING_SNAKE_CASE__ : "datasets.SplitGenerator" , SCREAMING_SNAKE_CASE__ : str = "arrow" , SCREAMING_SNAKE_CASE__ : Optional[Union[str, int]] = None , SCREAMING_SNAKE_CASE__ : Optional[int] = None , **SCREAMING_SNAKE_CASE__ : Optional[Any] , ):
'''simple docstring'''
self._validate_cache_dir()
__a : List[str] = convert_file_size_to_int(max_shard_size or MAX_SHARD_SIZE )
self._repartition_df_if_needed(SCREAMING_SNAKE_CASE__ )
__a : Union[str, Any] = not is_remote_filesystem(self._fs )
__a : Optional[Any] = os.path.join if is_local else posixpath.join
__a : Any = '-TTTTT-SSSSS-of-NNNNN'
__a : Union[str, Any] = f'''{self.name}-{split_generator.name}{SUFFIX}.{file_format}'''
__a : Any = path_join(self._output_dir , SCREAMING_SNAKE_CASE__ )
__a : Any = 0
__a : Dict = 0
__a : int = 0
__a : List[str] = []
__a : Optional[int] = []
for task_id, content in self._prepare_split_single(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
(
(
__a
) , (
__a
) , (
__a
) , (
__a
) ,
) : Optional[int] = content
if num_bytes > 0:
total_num_examples += num_examples
total_num_bytes += num_bytes
total_shards += num_shards
task_id_and_num_shards.append((task_id, num_shards) )
all_shard_lengths.extend(SCREAMING_SNAKE_CASE__ )
__a : List[str] = total_num_examples
__a : Optional[int] = total_num_bytes
# should rename everything at the end
logger.debug(f'''Renaming {total_shards} shards.''' )
if total_shards > 1:
__a : Any = all_shard_lengths
# Define fs outside of _rename_shard so that we don't reference self in the function, which will result in a
# pickling error due to pickling the SparkContext.
__a : Dict = self._fs
# use the -SSSSS-of-NNNNN pattern
def _rename_shard(
SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int , ):
rename(
SCREAMING_SNAKE_CASE__ , fpath.replace('SSSSS' , f'''{shard_id:05d}''' ).replace('TTTTT' , f'''{task_id:05d}''' ) , fpath.replace('TTTTT-SSSSS' , f'''{global_shard_id:05d}''' ).replace('NNNNN' , f'''{total_shards:05d}''' ) , )
__a : Union[str, Any] = []
__a : List[str] = 0
for i in range(len(SCREAMING_SNAKE_CASE__ ) ):
__a , __a : Union[str, Any] = task_id_and_num_shards[i]
for shard_id in range(SCREAMING_SNAKE_CASE__ ):
args.append([task_id, shard_id, global_shard_id] )
global_shard_id += 1
self._spark.sparkContext.parallelize(SCREAMING_SNAKE_CASE__ , len(SCREAMING_SNAKE_CASE__ ) ).map(lambda SCREAMING_SNAKE_CASE__ : _rename_shard(*SCREAMING_SNAKE_CASE__ ) ).collect()
else:
# don't use any pattern
__a : List[Any] = 0
__a : Any = task_id_and_num_shards[0][0]
self._rename(
fpath.replace('SSSSS' , f'''{shard_id:05d}''' ).replace('TTTTT' , f'''{task_id:05d}''' ) , fpath.replace(SCREAMING_SNAKE_CASE__ , '' ) , )
def __lowerCAmelCase ( self : str , SCREAMING_SNAKE_CASE__ : "datasets.SplitGenerator" , ):
'''simple docstring'''
return SparkExamplesIterable(self.df )
| 47 | 1 |
from __future__ import annotations
import math
def UpperCAmelCase__ ( lowerCamelCase_ : int , lowerCamelCase_ : int , lowerCamelCase_ : bool , lowerCamelCase_ : list[int] , lowerCamelCase_ : float ):
if depth < 0:
raise ValueError('Depth cannot be less than 0' )
if len(lowerCamelCase_ ) == 0:
raise ValueError('Scores cannot be empty' )
if depth == height:
return scores[node_index]
if is_max:
return max(
minimax(depth + 1 , node_index * 2 , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) , minimax(depth + 1 , node_index * 2 + 1 , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) , )
return min(
minimax(depth + 1 , node_index * 2 , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) , minimax(depth + 1 , node_index * 2 + 1 , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) , )
def UpperCAmelCase__ ( ):
__a : int = [9_0, 2_3, 6, 3_3, 2_1, 6_5, 1_2_3, 3_4_4_2_3]
__a : Optional[int] = math.log(len(lowerCamelCase_ ) , 2 )
print('Optimal value : ' , end='' )
print(minimax(0 , 0 , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 47 |
import argparse
import logging
import os
from datetime import datetime
import numpy as np
import torch
from torch import nn
from torch.utils.data import DataLoader, RandomSampler, TensorDataset
from tqdm import tqdm
from transformers import GPTaLMHeadModel
SCREAMING_SNAKE_CASE__ = logging.getLogger(__name__)
def UpperCAmelCase__ ( lowerCamelCase_ : str , lowerCamelCase_ : int ):
# save results
if os.path.exists(lowerCamelCase_ ):
if os.path.exists(os.path.join(lowerCamelCase_ , 'config.json' ) ) and os.path.isfile(
os.path.join(lowerCamelCase_ , 'config.json' ) ):
os.remove(os.path.join(lowerCamelCase_ , 'config.json' ) )
if os.path.exists(os.path.join(lowerCamelCase_ , 'pytorch_model.bin' ) ) and os.path.isfile(
os.path.join(lowerCamelCase_ , 'pytorch_model.bin' ) ):
os.remove(os.path.join(lowerCamelCase_ , 'pytorch_model.bin' ) )
else:
os.makedirs(lowerCamelCase_ )
model.save_pretrained(lowerCamelCase_ )
def UpperCAmelCase__ ( lowerCamelCase_ : int , lowerCamelCase_ : Any=False ):
__a : Dict = 2
if unlogit:
__a : Optional[Any] = torch.pow(lowerCamelCase_ , lowerCamelCase_ )
__a : Any = p * torch.log(lowerCamelCase_ )
__a : Union[str, Any] = 0
return -plogp.sum(dim=-1 )
def UpperCAmelCase__ ( lowerCamelCase_ : Any ):
logger.info('lv, h >\t' + '\t'.join(f'''{x + 1}''' for x in range(len(lowerCamelCase_ ) ) ) )
for row in range(len(lowerCamelCase_ ) ):
if tensor.dtype != torch.long:
logger.info(f'''layer {row + 1}:\t''' + '\t'.join(f'''{x:.5f}''' for x in tensor[row].cpu().data ) )
else:
logger.info(f'''layer {row + 1}:\t''' + '\t'.join(f'''{x:d}''' for x in tensor[row].cpu().data ) )
def UpperCAmelCase__ ( lowerCamelCase_ : Optional[int] , lowerCamelCase_ : Any , lowerCamelCase_ : int , lowerCamelCase_ : int=True , lowerCamelCase_ : Optional[Any]=True , lowerCamelCase_ : List[Any]=None , lowerCamelCase_ : List[Any]=False ):
__a , __a : Optional[int] = model.config.num_hidden_layers, model.config.num_attention_heads
__a : str = torch.zeros(lowerCamelCase_ , lowerCamelCase_ ).to(args.device )
__a : int = torch.zeros(lowerCamelCase_ , lowerCamelCase_ ).to(args.device )
if head_mask is None:
__a : Union[str, Any] = torch.ones(lowerCamelCase_ , lowerCamelCase_ ).to(args.device )
head_mask.requires_grad_(requires_grad=lowerCamelCase_ )
# If actually pruned attention multi-head, set head mask to None to avoid shape mismatch
if actually_pruned:
__a : Any = None
__a : Optional[int] = 0.0
__a : Optional[Any] = 0.0
for step, inputs in enumerate(tqdm(lowerCamelCase_ , desc='Iteration' , disable=args.local_rank not in [-1, 0] ) ):
__a : Dict = tuple(t.to(args.device ) for t in inputs )
((__a) , ) : Dict = inputs
# Do a forward pass (not with torch.no_grad() since we need gradients for importance score - see below)
__a : List[Any] = model(lowerCamelCase_ , labels=lowerCamelCase_ , head_mask=lowerCamelCase_ )
# (loss), lm_logits, presents, (all hidden_states), (attentions)
__a , __a , __a : int = (
outputs[0],
outputs[1],
outputs[-1],
) # Loss and logits are the first, attention the last
loss.backward() # Backpropagate to populate the gradients in the head mask
total_loss += loss.detach().cpu().numpy()
if compute_entropy:
for layer, attn in enumerate(lowerCamelCase_ ):
__a : List[str] = entropy(attn.detach() , lowerCamelCase_ )
attn_entropy[layer] += masked_entropy.sum(-1 ).sum(0 ).sum(0 ).detach()
if compute_importance:
head_importance += head_mask.grad.abs().detach()
tot_tokens += torch.ones_like(lowerCamelCase_ ).float().detach().sum().data
# Normalize
attn_entropy /= tot_tokens
head_importance /= tot_tokens
# Layerwise importance normalization
if not args.dont_normalize_importance_by_layer:
__a : Optional[Any] = 2
__a : Union[str, Any] = torch.pow(torch.pow(lowerCamelCase_ , lowerCamelCase_ ).sum(-1 ) , 1 / exponent )
head_importance /= norm_by_layer.unsqueeze(-1 ) + 1e-20
if not args.dont_normalize_global_importance:
__a : List[str] = (head_importance - head_importance.min()) / (head_importance.max() - head_importance.min())
# Print matrices
if compute_entropy:
logger.info('Attention entropies' )
print_ad_tensor(lowerCamelCase_ )
if compute_importance:
logger.info('Head importance scores' )
print_ad_tensor(lowerCamelCase_ )
logger.info('Head ranked by importance scores' )
__a : Optional[Any] = torch.zeros(head_importance.numel() , dtype=torch.long , device=args.device )
__a : str = torch.arange(
head_importance.numel() , device=args.device )
__a : Tuple = head_ranks.view_as(lowerCamelCase_ )
print_ad_tensor(lowerCamelCase_ )
return attn_entropy, head_importance, total_loss
def UpperCAmelCase__ ( lowerCamelCase_ : Union[str, Any] , lowerCamelCase_ : Tuple , lowerCamelCase_ : int ):
__a , __a , __a : Optional[int] = compute_heads_importance(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , compute_entropy=lowerCamelCase_ )
__a : Tuple = 1 / loss # instead of downsteam score use the LM loss
logger.info('Pruning: original score: %f, threshold: %f' , lowerCamelCase_ , original_score * args.masking_threshold )
__a : Tuple = torch.ones_like(lowerCamelCase_ )
__a : int = max(1 , int(new_head_mask.numel() * args.masking_amount ) )
__a : Tuple = original_score
while current_score >= original_score * args.masking_threshold:
__a : Optional[Any] = new_head_mask.clone().detach() # save current head mask
# heads from least important to most - keep only not-masked heads
__a : List[str] = float('Inf' )
__a : List[Any] = head_importance.view(-1 ).sort()[1]
if len(lowerCamelCase_ ) <= num_to_mask:
print('BREAK BY num_to_mask' )
break
# mask heads
__a : Any = current_heads_to_mask[:num_to_mask]
logger.info('Heads to mask: %s' , str(current_heads_to_mask.tolist() ) )
__a : int = new_head_mask.view(-1 )
__a : Tuple = 0.0
__a : int = new_head_mask.view_as(lowerCamelCase_ )
__a : Optional[int] = new_head_mask.clone().detach()
print_ad_tensor(lowerCamelCase_ )
# Compute metric and head importance again
__a , __a , __a : int = compute_heads_importance(
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , compute_entropy=lowerCamelCase_ , head_mask=lowerCamelCase_ )
__a : List[Any] = 1 / loss
logger.info(
'Masking: current score: %f, remaining heads %d (%.1f percents)' , lowerCamelCase_ , new_head_mask.sum() , new_head_mask.sum() / new_head_mask.numel() * 1_0_0 , )
logger.info('Final head mask' )
print_ad_tensor(lowerCamelCase_ )
np.save(os.path.join(args.output_dir , 'head_mask.npy' ) , head_mask.detach().cpu().numpy() )
return head_mask
def UpperCAmelCase__ ( lowerCamelCase_ : Dict , lowerCamelCase_ : Tuple , lowerCamelCase_ : Any , lowerCamelCase_ : Union[str, Any] ):
__a : List[Any] = datetime.now()
__a , __a , __a : List[str] = compute_heads_importance(
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , compute_entropy=lowerCamelCase_ , compute_importance=lowerCamelCase_ , head_mask=lowerCamelCase_ )
__a : List[str] = 1 / loss
__a : List[Any] = datetime.now() - before_time
__a : List[str] = sum(p.numel() for p in model.parameters() )
__a : Dict = {
layer: (1 - head_mask[layer].long()).nonzero().squeeze().tolist() for layer in range(len(lowerCamelCase_ ) )
}
for k, v in heads_to_prune.items():
if isinstance(lowerCamelCase_ , lowerCamelCase_ ):
__a : Tuple = [
v,
]
assert sum(len(lowerCamelCase_ ) for h in heads_to_prune.values() ) == (1 - head_mask.long()).sum().item()
model.prune_heads(lowerCamelCase_ )
__a : Optional[Any] = sum(p.numel() for p in model.parameters() )
__a : Tuple = datetime.now()
__a , __a , __a : Tuple = compute_heads_importance(
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , compute_entropy=lowerCamelCase_ , compute_importance=lowerCamelCase_ , head_mask=lowerCamelCase_ , actually_pruned=lowerCamelCase_ , )
__a : Optional[Any] = 1 / loss
__a : List[Any] = datetime.now() - before_time
logger.info(
'Pruning: original num of params: %.2e, after pruning %.2e (%.1f percents)' , lowerCamelCase_ , lowerCamelCase_ , pruned_num_params / original_num_params * 1_0_0 , )
logger.info('Pruning: score with masking: %f score with pruning: %f' , lowerCamelCase_ , lowerCamelCase_ )
logger.info('Pruning: speed ratio (original timing / new timing): %f percents' , original_time / new_time * 1_0_0 )
save_model(lowerCamelCase_ , args.output_dir )
def UpperCAmelCase__ ( ):
__a : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--data_dir' , default=lowerCamelCase_ , type=lowerCamelCase_ , required=lowerCamelCase_ , help='The input data dir. Should contain the .tsv files (or other data files) for the task.' , )
parser.add_argument(
'--model_name_or_path' , default=lowerCamelCase_ , type=lowerCamelCase_ , required=lowerCamelCase_ , help='Path to pretrained model or model identifier from huggingface.co/models' , )
parser.add_argument(
'--output_dir' , default=lowerCamelCase_ , type=lowerCamelCase_ , required=lowerCamelCase_ , help='The output directory where the model predictions and checkpoints will be written.' , )
# Other parameters
parser.add_argument(
'--config_name' , default='' , type=lowerCamelCase_ , help='Pretrained config name or path if not the same as model_name_or_path' , )
parser.add_argument(
'--tokenizer_name' , default='' , type=lowerCamelCase_ , help='Pretrained tokenizer name or path if not the same as model_name_or_path' , )
parser.add_argument(
'--cache_dir' , default=lowerCamelCase_ , type=lowerCamelCase_ , help='Where do you want to store the pre-trained models downloaded from s3' , )
parser.add_argument(
'--data_subset' , type=lowerCamelCase_ , default=-1 , help='If > 0: limit the data to a subset of data_subset instances.' )
parser.add_argument(
'--overwrite_output_dir' , action='store_true' , help='Whether to overwrite data in output directory' )
parser.add_argument(
'--overwrite_cache' , action='store_true' , help='Overwrite the cached training and evaluation sets' )
parser.add_argument(
'--dont_normalize_importance_by_layer' , action='store_true' , help='Don\'t normalize importance score by layers' )
parser.add_argument(
'--dont_normalize_global_importance' , action='store_true' , help='Don\'t normalize all importance scores between 0 and 1' , )
parser.add_argument(
'--try_masking' , action='store_true' , help='Whether to try to mask head until a threshold of accuracy.' )
parser.add_argument(
'--masking_threshold' , default=0.9 , type=lowerCamelCase_ , help='masking threshold in term of metrics (stop masking when metric < threshold * original metric value).' , )
parser.add_argument(
'--masking_amount' , default=0.1 , type=lowerCamelCase_ , help='Amount to heads to masking at each masking step.' )
parser.add_argument('--metric_name' , default='acc' , type=lowerCamelCase_ , help='Metric to use for head masking.' )
parser.add_argument(
'--max_seq_length' , default=1_2_8 , type=lowerCamelCase_ , help=(
'The maximum total input sequence length after WordPiece tokenization. \n'
'Sequences longer than this will be truncated, sequences shorter padded.'
) , )
parser.add_argument('--batch_size' , default=1 , type=lowerCamelCase_ , help='Batch size.' )
parser.add_argument('--seed' , type=lowerCamelCase_ , default=4_2 )
parser.add_argument('--local_rank' , type=lowerCamelCase_ , default=-1 , help='local_rank for distributed training on gpus' )
parser.add_argument('--no_cuda' , action='store_true' , help='Whether not to use CUDA when available' )
parser.add_argument('--server_ip' , type=lowerCamelCase_ , default='' , help='Can be used for distant debugging.' )
parser.add_argument('--server_port' , type=lowerCamelCase_ , default='' , help='Can be used for distant debugging.' )
__a : Optional[Any] = parser.parse_args()
if args.server_ip and args.server_port:
# Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script
import ptvsd
print('Waiting for debugger attach' )
ptvsd.enable_attach(address=(args.server_ip, args.server_port) , redirect_output=lowerCamelCase_ )
ptvsd.wait_for_attach()
# Setup devices and distributed training
if args.local_rank == -1 or args.no_cuda:
__a : List[str] = torch.device('cuda' if torch.cuda.is_available() and not args.no_cuda else 'cpu' )
__a : Tuple = 0 if args.no_cuda else torch.cuda.device_count()
else:
torch.cuda.set_device(args.local_rank )
__a : Union[str, Any] = torch.device('cuda' , args.local_rank )
__a : Any = 1
torch.distributed.init_process_group(backend='nccl' ) # Initializes the distributed backend
# Setup logging
logging.basicConfig(level=logging.INFO if args.local_rank in [-1, 0] else logging.WARN )
logger.info('device: {} n_gpu: {}, distributed: {}'.format(args.device , args.n_gpu , bool(args.local_rank != -1 ) ) )
__a : Optional[Any] = GPTaLMHeadModel.from_pretrained(args.model_name_or_path )
# Distributed and parallel training
model.to(args.device )
if args.local_rank != -1:
__a : List[Any] = nn.parallel.DistributedDataParallel(
lowerCamelCase_ , device_ids=[args.local_rank] , output_device=args.local_rank , find_unused_parameters=lowerCamelCase_ )
elif args.n_gpu > 1:
__a : Union[str, Any] = nn.DataParallel(lowerCamelCase_ )
# Print/save training arguments
os.makedirs(args.output_dir , exist_ok=lowerCamelCase_ )
torch.save(lowerCamelCase_ , os.path.join(args.output_dir , 'run_args.bin' ) )
logger.info('Training/evaluation parameters %s' , lowerCamelCase_ )
# Prepare dataset
__a : Tuple = np.concatenate(
[
np.loadtxt(args.data_dir , dtype=np.intaa ),
] )
__a : str = (torch.from_numpy(lowerCamelCase_ ),)
__a : List[str] = TensorDataset(*lowerCamelCase_ )
__a : Optional[Any] = RandomSampler(lowerCamelCase_ )
__a : Union[str, Any] = DataLoader(lowerCamelCase_ , sampler=lowerCamelCase_ , batch_size=args.batch_size )
# Compute head entropy and importance score
compute_heads_importance(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
# Try head masking (set heads to zero until the score goes under a threshole)
# and head pruning (remove masked heads and see the effect on the network)
if args.try_masking and args.masking_threshold > 0.0 and args.masking_threshold < 1.0:
__a : Union[str, Any] = mask_heads(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
prune_heads(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
if __name__ == "__main__":
main()
| 47 | 1 |
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
SCREAMING_SNAKE_CASE__ = {
'''configuration_mgp_str''': ['''MGP_STR_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''MgpstrConfig'''],
'''processing_mgp_str''': ['''MgpstrProcessor'''],
'''tokenization_mgp_str''': ['''MgpstrTokenizer'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ = [
'''MGP_STR_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''MgpstrModel''',
'''MgpstrPreTrainedModel''',
'''MgpstrForSceneTextRecognition''',
]
if TYPE_CHECKING:
from .configuration_mgp_str import MGP_STR_PRETRAINED_CONFIG_ARCHIVE_MAP, MgpstrConfig
from .processing_mgp_str import MgpstrProcessor
from .tokenization_mgp_str import MgpstrTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mgp_str import (
MGP_STR_PRETRAINED_MODEL_ARCHIVE_LIST,
MgpstrForSceneTextRecognition,
MgpstrModel,
MgpstrPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 47 |
import argparse
import os
import gluonnlp as nlp
import mxnet as mx
import numpy as np
import torch
from gluonnlp.base import get_home_dir
from gluonnlp.model.bert import BERTEncoder
from gluonnlp.model.utils import _load_vocab
from gluonnlp.vocab import Vocab
from packaging import version
from torch import nn
from transformers import BertConfig, BertForMaskedLM, BertModel, RobertaTokenizer
from transformers.models.bert.modeling_bert import (
BertIntermediate,
BertLayer,
BertOutput,
BertSelfAttention,
BertSelfOutput,
)
from transformers.utils import logging
if version.parse(nlp.__version__) != version.parse('''0.8.3'''):
raise Exception('''requires gluonnlp == 0.8.3''')
if version.parse(mx.__version__) != version.parse('''1.5.0'''):
raise Exception('''requires mxnet == 1.5.0''')
logging.set_verbosity_info()
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ = '''The Nymphenburg Palace is a beautiful palace in Munich!'''
def UpperCAmelCase__ ( lowerCamelCase_ : str , lowerCamelCase_ : str ):
__a : List[Any] = {
'attention_cell': 'multi_head',
'num_layers': 4,
'units': 1_0_2_4,
'hidden_size': 7_6_8,
'max_length': 5_1_2,
'num_heads': 8,
'scaled': True,
'dropout': 0.1,
'use_residual': True,
'embed_size': 1_0_2_4,
'embed_dropout': 0.1,
'word_embed': None,
'layer_norm_eps': 1e-5,
'token_type_vocab_size': 2,
}
__a : Optional[int] = bort_4_8_768_1024_hparams
# Let's construct the original Bort model here
# Taken from official BERT implementation, see:
# https://github.com/alexa/bort/blob/master/bort/bort.py
__a : List[str] = BERTEncoder(
attention_cell=predefined_args['attention_cell'] , num_layers=predefined_args['num_layers'] , units=predefined_args['units'] , hidden_size=predefined_args['hidden_size'] , max_length=predefined_args['max_length'] , num_heads=predefined_args['num_heads'] , scaled=predefined_args['scaled'] , dropout=predefined_args['dropout'] , output_attention=lowerCamelCase_ , output_all_encodings=lowerCamelCase_ , use_residual=predefined_args['use_residual'] , activation=predefined_args.get('activation' , 'gelu' ) , layer_norm_eps=predefined_args.get('layer_norm_eps' , lowerCamelCase_ ) , )
# Vocab information needs to be fetched first
# It's the same as RoBERTa, so RobertaTokenizer can be used later
__a : int = 'openwebtext_ccnews_stories_books_cased'
# Specify download folder to Gluonnlp's vocab
__a : Optional[Any] = os.path.join(get_home_dir() , 'models' )
__a : Optional[Any] = _load_vocab(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , cls=lowerCamelCase_ )
__a : Any = nlp.model.BERTModel(
lowerCamelCase_ , len(lowerCamelCase_ ) , units=predefined_args['units'] , embed_size=predefined_args['embed_size'] , embed_dropout=predefined_args['embed_dropout'] , word_embed=predefined_args['word_embed'] , use_pooler=lowerCamelCase_ , use_token_type_embed=lowerCamelCase_ , token_type_vocab_size=predefined_args['token_type_vocab_size'] , use_classifier=lowerCamelCase_ , use_decoder=lowerCamelCase_ , )
original_bort.load_parameters(lowerCamelCase_ , cast_dtype=lowerCamelCase_ , ignore_extra=lowerCamelCase_ )
__a : Dict = original_bort._collect_params_with_prefix()
# Build our config 🤗
__a : Optional[Any] = {
'architectures': ['BertForMaskedLM'],
'attention_probs_dropout_prob': predefined_args['dropout'],
'hidden_act': 'gelu',
'hidden_dropout_prob': predefined_args['dropout'],
'hidden_size': predefined_args['embed_size'],
'initializer_range': 0.02,
'intermediate_size': predefined_args['hidden_size'],
'layer_norm_eps': predefined_args['layer_norm_eps'],
'max_position_embeddings': predefined_args['max_length'],
'model_type': 'bort',
'num_attention_heads': predefined_args['num_heads'],
'num_hidden_layers': predefined_args['num_layers'],
'pad_token_id': 1, # 2 = BERT, 1 = RoBERTa
'type_vocab_size': 1, # 2 = BERT, 1 = RoBERTa
'vocab_size': len(lowerCamelCase_ ),
}
__a : str = BertConfig.from_dict(lowerCamelCase_ )
__a : Optional[int] = BertForMaskedLM(lowerCamelCase_ )
hf_bort_model.eval()
# Parameter mapping table (Gluonnlp to Transformers)
# * denotes layer index
#
# | Gluon Parameter | Transformers Parameter
# | -------------------------------------------------------------- | ----------------------
# | `encoder.layer_norm.beta` | `bert.embeddings.LayerNorm.bias`
# | `encoder.layer_norm.gamma` | `bert.embeddings.LayerNorm.weight`
# | `encoder.position_weight` | `bert.embeddings.position_embeddings.weight`
# | `word_embed.0.weight` | `bert.embeddings.word_embeddings.weight`
# | `encoder.transformer_cells.*.attention_cell.proj_key.bias` | `bert.encoder.layer.*.attention.self.key.bias`
# | `encoder.transformer_cells.*.attention_cell.proj_key.weight` | `bert.encoder.layer.*.attention.self.key.weight`
# | `encoder.transformer_cells.*.attention_cell.proj_query.bias` | `bert.encoder.layer.*.attention.self.query.bias`
# | `encoder.transformer_cells.*.attention_cell.proj_query.weight` | `bert.encoder.layer.*.attention.self.query.weight`
# | `encoder.transformer_cells.*.attention_cell.proj_value.bias` | `bert.encoder.layer.*.attention.self.value.bias`
# | `encoder.transformer_cells.*.attention_cell.proj_value.weight` | `bert.encoder.layer.*.attention.self.value.weight`
# | `encoder.transformer_cells.*.ffn.ffn_2.bias` | `bert.encoder.layer.*.attention.output.dense.bias`
# | `encoder.transformer_cells.*.ffn.ffn_2.weight` | `bert.encoder.layer.*.attention.output.dense.weight`
# | `encoder.transformer_cells.*.layer_norm.beta` | `bert.encoder.layer.*.attention.output.LayerNorm.bias`
# | `encoder.transformer_cells.*.layer_norm.gamma` | `bert.encoder.layer.*.attention.output.LayerNorm.weight`
# | `encoder.transformer_cells.*.ffn.ffn_1.bias` | `bert.encoder.layer.*.intermediate.dense.bias`
# | `encoder.transformer_cells.*.ffn.ffn_1.weight` | `bert.encoder.layer.*.intermediate.dense.weight`
# | `encoder.transformer_cells.*.ffn.layer_norm.beta` | `bert.encoder.layer.*.output.LayerNorm.bias`
# | `encoder.transformer_cells.*.ffn.layer_norm.gamma` | `bert.encoder.layer.*.output.LayerNorm.weight`
# | `encoder.transformer_cells.*.proj.bias` | `bert.encoder.layer.*.output.dense.bias`
# | `encoder.transformer_cells.*.proj.weight` | `bert.encoder.layer.*.output.dense.weight`
# Helper function to convert MXNET Arrays to PyTorch
def to_torch(lowerCamelCase_ : Optional[Any] ) -> nn.Parameter:
return nn.Parameter(torch.FloatTensor(mx_array.data().asnumpy() ) )
# Check param shapes and map new HF param back
def check_and_map_params(lowerCamelCase_ : Optional[Any] , lowerCamelCase_ : List[str] ):
__a : Optional[int] = hf_param.shape
__a : int = to_torch(params[gluon_param] )
__a : int = gluon_param.shape
assert (
shape_hf == shape_gluon
), f'''The gluon parameter {gluon_param} has shape {shape_gluon}, but expects shape {shape_hf} for Transformers'''
return gluon_param
__a : str = check_and_map_params(
hf_bort_model.bert.embeddings.word_embeddings.weight , 'word_embed.0.weight' )
__a : str = check_and_map_params(
hf_bort_model.bert.embeddings.position_embeddings.weight , 'encoder.position_weight' )
__a : Tuple = check_and_map_params(
hf_bort_model.bert.embeddings.LayerNorm.bias , 'encoder.layer_norm.beta' )
__a : Union[str, Any] = check_and_map_params(
hf_bort_model.bert.embeddings.LayerNorm.weight , 'encoder.layer_norm.gamma' )
# Inspired by RoBERTa conversion script, we just zero them out (Bort does not use them)
__a : Union[str, Any] = torch.zeros_like(
hf_bort_model.bert.embeddings.token_type_embeddings.weight.data )
for i in range(hf_bort_config.num_hidden_layers ):
__a : BertLayer = hf_bort_model.bert.encoder.layer[i]
# self attention
__a : BertSelfAttention = layer.attention.self
__a : Optional[int] = check_and_map_params(
self_attn.key.bias.data , f'''encoder.transformer_cells.{i}.attention_cell.proj_key.bias''' )
__a : str = check_and_map_params(
self_attn.key.weight.data , f'''encoder.transformer_cells.{i}.attention_cell.proj_key.weight''' )
__a : List[str] = check_and_map_params(
self_attn.query.bias.data , f'''encoder.transformer_cells.{i}.attention_cell.proj_query.bias''' )
__a : str = check_and_map_params(
self_attn.query.weight.data , f'''encoder.transformer_cells.{i}.attention_cell.proj_query.weight''' )
__a : Dict = check_and_map_params(
self_attn.value.bias.data , f'''encoder.transformer_cells.{i}.attention_cell.proj_value.bias''' )
__a : str = check_and_map_params(
self_attn.value.weight.data , f'''encoder.transformer_cells.{i}.attention_cell.proj_value.weight''' )
# self attention output
__a : BertSelfOutput = layer.attention.output
__a : Tuple = check_and_map_params(
self_output.dense.bias , f'''encoder.transformer_cells.{i}.proj.bias''' )
__a : Dict = check_and_map_params(
self_output.dense.weight , f'''encoder.transformer_cells.{i}.proj.weight''' )
__a : Optional[Any] = check_and_map_params(
self_output.LayerNorm.bias , f'''encoder.transformer_cells.{i}.layer_norm.beta''' )
__a : Optional[Any] = check_and_map_params(
self_output.LayerNorm.weight , f'''encoder.transformer_cells.{i}.layer_norm.gamma''' )
# intermediate
__a : BertIntermediate = layer.intermediate
__a : List[str] = check_and_map_params(
intermediate.dense.bias , f'''encoder.transformer_cells.{i}.ffn.ffn_1.bias''' )
__a : Optional[Any] = check_and_map_params(
intermediate.dense.weight , f'''encoder.transformer_cells.{i}.ffn.ffn_1.weight''' )
# output
__a : BertOutput = layer.output
__a : str = check_and_map_params(
bert_output.dense.bias , f'''encoder.transformer_cells.{i}.ffn.ffn_2.bias''' )
__a : List[Any] = check_and_map_params(
bert_output.dense.weight , f'''encoder.transformer_cells.{i}.ffn.ffn_2.weight''' )
__a : str = check_and_map_params(
bert_output.LayerNorm.bias , f'''encoder.transformer_cells.{i}.ffn.layer_norm.beta''' )
__a : List[str] = check_and_map_params(
bert_output.LayerNorm.weight , f'''encoder.transformer_cells.{i}.ffn.layer_norm.gamma''' )
# Save space and energy 🎄
hf_bort_model.half()
# Compare output of both models
__a : Union[str, Any] = RobertaTokenizer.from_pretrained('roberta-base' )
__a : Union[str, Any] = tokenizer.encode_plus(lowerCamelCase_ )['input_ids']
# Get gluon output
__a : Optional[int] = mx.nd.array([input_ids] )
__a : Tuple = original_bort(inputs=lowerCamelCase_ , token_types=[] )
# Get Transformer output (save and reload model again)
hf_bort_model.save_pretrained(lowerCamelCase_ )
__a : Optional[Any] = BertModel.from_pretrained(lowerCamelCase_ )
hf_bort_model.eval()
__a : Union[str, Any] = tokenizer.encode_plus(lowerCamelCase_ , return_tensors='pt' )
__a : int = hf_bort_model(**lowerCamelCase_ )[0]
__a : Dict = output_gluon[0].asnumpy()
__a : str = output_hf[0].detach().numpy()
__a : List[Any] = np.max(np.abs(hf_layer - gluon_layer ) ).item()
__a : str = np.allclose(lowerCamelCase_ , lowerCamelCase_ , atol=1e-3 )
if success:
print('✔️ Both model do output the same tensors' )
else:
print('❌ Both model do **NOT** output the same tensors' )
print('Absolute difference is:' , lowerCamelCase_ )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--bort_checkpoint_path''', default=None, type=str, required=True, help='''Path the official Bort params file.'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
SCREAMING_SNAKE_CASE__ = parser.parse_args()
convert_bort_checkpoint_to_pytorch(args.bort_checkpoint_path, args.pytorch_dump_folder_path)
| 47 | 1 |
import os
from pathlib import Path
from unittest.mock import patch
import pytest
import zstandard as zstd
from datasets.download.download_config import DownloadConfig
from datasets.utils.file_utils import (
OfflineModeIsEnabled,
cached_path,
fsspec_get,
fsspec_head,
ftp_get,
ftp_head,
get_from_cache,
http_get,
http_head,
)
SCREAMING_SNAKE_CASE__ = '''\
Text data.
Second line of data.'''
SCREAMING_SNAKE_CASE__ = '''file'''
@pytest.fixture(scope='session' )
def UpperCAmelCase__ ( lowerCamelCase_ : Optional[Any] ):
__a : Union[str, Any] = tmp_path_factory.mktemp('data' ) / (FILE_PATH + '.zstd')
__a : Optional[int] = bytes(lowerCamelCase_ , 'utf-8' )
with zstd.open(lowerCamelCase_ , 'wb' ) as f:
f.write(lowerCamelCase_ )
return path
@pytest.fixture
def UpperCAmelCase__ ( lowerCamelCase_ : str ):
with open(os.path.join(tmpfs.local_root_dir , lowerCamelCase_ ) , 'w' ) as f:
f.write(lowerCamelCase_ )
return FILE_PATH
@pytest.mark.parametrize('compression_format' , ['gzip', 'xz', 'zstd'] )
def UpperCAmelCase__ ( lowerCamelCase_ : Optional[Any] , lowerCamelCase_ : Dict , lowerCamelCase_ : int , lowerCamelCase_ : Union[str, Any] , lowerCamelCase_ : List[str] , lowerCamelCase_ : Union[str, Any] ):
__a : Optional[int] = {'gzip': gz_file, 'xz': xz_file, 'zstd': zstd_path}
__a : Dict = input_paths[compression_format]
__a : str = tmp_path / 'cache'
__a : int = DownloadConfig(cache_dir=lowerCamelCase_ , extract_compressed_file=lowerCamelCase_ )
__a : Tuple = cached_path(lowerCamelCase_ , download_config=lowerCamelCase_ )
with open(lowerCamelCase_ ) as f:
__a : Tuple = f.read()
with open(lowerCamelCase_ ) as f:
__a : List[str] = f.read()
assert extracted_file_content == expected_file_content
@pytest.mark.parametrize('default_extracted' , [True, False] )
@pytest.mark.parametrize('default_cache_dir' , [True, False] )
def UpperCAmelCase__ ( lowerCamelCase_ : str , lowerCamelCase_ : int , lowerCamelCase_ : Union[str, Any] , lowerCamelCase_ : Union[str, Any] , lowerCamelCase_ : Union[str, Any] ):
__a : List[Any] = 'custom_cache'
__a : List[str] = 'custom_extracted_dir'
__a : List[str] = tmp_path / 'custom_extracted_path'
if default_extracted:
__a : Tuple = ('downloads' if default_cache_dir else custom_cache_dir, 'extracted')
else:
monkeypatch.setattr('datasets.config.EXTRACTED_DATASETS_DIR' , lowerCamelCase_ )
monkeypatch.setattr('datasets.config.EXTRACTED_DATASETS_PATH' , str(lowerCamelCase_ ) )
__a : Union[str, Any] = custom_extracted_path.parts[-2:] if default_cache_dir else (custom_cache_dir, custom_extracted_dir)
__a : str = xz_file
__a : List[str] = (
DownloadConfig(extract_compressed_file=lowerCamelCase_ )
if default_cache_dir
else DownloadConfig(cache_dir=tmp_path / custom_cache_dir , extract_compressed_file=lowerCamelCase_ )
)
__a : List[Any] = cached_path(lowerCamelCase_ , download_config=lowerCamelCase_ )
assert Path(lowerCamelCase_ ).parent.parts[-2:] == expected
def UpperCAmelCase__ ( lowerCamelCase_ : Optional[int] ):
# absolute path
__a : Dict = str(Path(lowerCamelCase_ ).resolve() )
assert cached_path(lowerCamelCase_ ) == text_file
# relative path
__a : str = str(Path(lowerCamelCase_ ).resolve().relative_to(Path(os.getcwd() ) ) )
assert cached_path(lowerCamelCase_ ) == text_file
def UpperCAmelCase__ ( lowerCamelCase_ : str ):
# absolute path
__a : Optional[Any] = str(tmp_path.resolve() / '__missing_file__.txt' )
with pytest.raises(lowerCamelCase_ ):
cached_path(lowerCamelCase_ )
# relative path
__a : Tuple = './__missing_file__.txt'
with pytest.raises(lowerCamelCase_ ):
cached_path(lowerCamelCase_ )
def UpperCAmelCase__ ( lowerCamelCase_ : Tuple ):
__a : List[str] = get_from_cache(f'''tmp://{tmpfs_file}''' )
with open(lowerCamelCase_ ) as f:
__a : List[Any] = f.read()
assert output_file_content == FILE_CONTENT
@patch('datasets.config.HF_DATASETS_OFFLINE' , lowerCamelCase_ )
def UpperCAmelCase__ ( ):
with pytest.raises(lowerCamelCase_ ):
cached_path('https://huggingface.co' )
@patch('datasets.config.HF_DATASETS_OFFLINE' , lowerCamelCase_ )
def UpperCAmelCase__ ( lowerCamelCase_ : Union[str, Any] ):
__a : List[str] = tmp_path_factory.mktemp('data' ) / 'file.html'
with pytest.raises(lowerCamelCase_ ):
http_get('https://huggingface.co' , temp_file=lowerCamelCase_ )
with pytest.raises(lowerCamelCase_ ):
http_head('https://huggingface.co' )
@patch('datasets.config.HF_DATASETS_OFFLINE' , lowerCamelCase_ )
def UpperCAmelCase__ ( lowerCamelCase_ : List[Any] ):
__a : Union[str, Any] = tmp_path_factory.mktemp('data' ) / 'file.html'
with pytest.raises(lowerCamelCase_ ):
ftp_get('ftp://huggingface.co' , temp_file=lowerCamelCase_ )
with pytest.raises(lowerCamelCase_ ):
ftp_head('ftp://huggingface.co' )
@patch('datasets.config.HF_DATASETS_OFFLINE' , lowerCamelCase_ )
def UpperCAmelCase__ ( lowerCamelCase_ : Any ):
__a : Optional[int] = tmp_path_factory.mktemp('data' ) / 'file.html'
with pytest.raises(lowerCamelCase_ ):
fsspec_get('s3://huggingface.co' , temp_file=lowerCamelCase_ )
with pytest.raises(lowerCamelCase_ ):
fsspec_head('s3://huggingface.co' )
| 47 |
def UpperCAmelCase__ ( lowerCamelCase_ : Optional[int] , lowerCamelCase_ : List[str] ):
__a : Any = ''
for i in table:
res += inp[i - 1]
return res
def UpperCAmelCase__ ( lowerCamelCase_ : Optional[Any] ):
return data[1:] + data[0]
def UpperCAmelCase__ ( lowerCamelCase_ : Optional[Any] , lowerCamelCase_ : Optional[int] ):
__a : Optional[int] = ''
for i in range(len(lowerCamelCase_ ) ):
if a[i] == b[i]:
res += "0"
else:
res += "1"
return res
def UpperCAmelCase__ ( lowerCamelCase_ : List[Any] , lowerCamelCase_ : str ):
__a : List[str] = int('0b' + data[0] + data[-1] , 2 )
__a : List[str] = int('0b' + data[1:3] , 2 )
return bin(s[row][col] )[2:]
def UpperCAmelCase__ ( lowerCamelCase_ : Any , lowerCamelCase_ : List[str] , lowerCamelCase_ : int , lowerCamelCase_ : int , lowerCamelCase_ : Optional[Any] ):
__a : List[Any] = message[:4]
__a : str = message[4:]
__a : Any = apply_table(lowerCamelCase_ , lowerCamelCase_ )
__a : int = xor(lowerCamelCase_ , lowerCamelCase_ )
__a : Dict = apply_sbox(lowerCamelCase_ , temp[:4] ) # noqa: E741
__a : Tuple = apply_sbox(lowerCamelCase_ , temp[4:] )
__a : List[Any] = '0' * (2 - len(lowerCamelCase_ )) + l # noqa: E741
__a : List[str] = '0' * (2 - len(lowerCamelCase_ )) + r
__a : List[Any] = apply_table(l + r , lowerCamelCase_ )
__a : Dict = xor(lowerCamelCase_ , lowerCamelCase_ )
return temp + right
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ = input('''Enter 10 bit key: ''')
SCREAMING_SNAKE_CASE__ = input('''Enter 8 bit message: ''')
SCREAMING_SNAKE_CASE__ = [6, 3, 7, 4, 8, 5, 10, 9]
SCREAMING_SNAKE_CASE__ = [3, 5, 2, 7, 4, 10, 1, 9, 8, 6]
SCREAMING_SNAKE_CASE__ = [2, 4, 3, 1]
SCREAMING_SNAKE_CASE__ = [2, 6, 3, 1, 4, 8, 5, 7]
SCREAMING_SNAKE_CASE__ = [4, 1, 3, 5, 7, 2, 8, 6]
SCREAMING_SNAKE_CASE__ = [4, 1, 2, 3, 2, 3, 4, 1]
SCREAMING_SNAKE_CASE__ = [[1, 0, 3, 2], [3, 2, 1, 0], [0, 2, 1, 3], [3, 1, 3, 2]]
SCREAMING_SNAKE_CASE__ = [[0, 1, 2, 3], [2, 0, 1, 3], [3, 0, 1, 0], [2, 1, 0, 3]]
# key generation
SCREAMING_SNAKE_CASE__ = apply_table(key, paa_table)
SCREAMING_SNAKE_CASE__ = temp[:5]
SCREAMING_SNAKE_CASE__ = temp[5:]
SCREAMING_SNAKE_CASE__ = left_shift(left)
SCREAMING_SNAKE_CASE__ = left_shift(right)
SCREAMING_SNAKE_CASE__ = apply_table(left + right, pa_table)
SCREAMING_SNAKE_CASE__ = left_shift(left)
SCREAMING_SNAKE_CASE__ = left_shift(right)
SCREAMING_SNAKE_CASE__ = left_shift(left)
SCREAMING_SNAKE_CASE__ = left_shift(right)
SCREAMING_SNAKE_CASE__ = apply_table(left + right, pa_table)
# encryption
SCREAMING_SNAKE_CASE__ = apply_table(message, IP)
SCREAMING_SNAKE_CASE__ = function(expansion, sa, sa, keya, temp)
SCREAMING_SNAKE_CASE__ = temp[4:] + temp[:4]
SCREAMING_SNAKE_CASE__ = function(expansion, sa, sa, keya, temp)
SCREAMING_SNAKE_CASE__ = apply_table(temp, IP_inv)
print('''Cipher text is:''', CT)
# decryption
SCREAMING_SNAKE_CASE__ = apply_table(CT, IP)
SCREAMING_SNAKE_CASE__ = function(expansion, sa, sa, keya, temp)
SCREAMING_SNAKE_CASE__ = temp[4:] + temp[:4]
SCREAMING_SNAKE_CASE__ = function(expansion, sa, sa, keya, temp)
SCREAMING_SNAKE_CASE__ = apply_table(temp, IP_inv)
print('''Plain text after decypting is:''', PT)
| 47 | 1 |
import math
from datetime import datetime, timedelta
def UpperCAmelCase__ ( lowerCamelCase_ : int ):
__a : Union[str, Any] = year % 1_9
__a : int = year % 4
__a : Optional[int] = year % 7
__a : Dict = math.floor(year / 1_0_0 )
__a : Optional[Any] = math.floor((1_3 + 8 * leap_day_inhibits) / 2_5 )
__a : Union[str, Any] = leap_day_inhibits / 4
__a : str = (
1_5 - lunar_orbit_correction + leap_day_inhibits - leap_day_reinstall_number
) % 3_0
__a : Union[str, Any] = (4 + leap_day_inhibits - leap_day_reinstall_number) % 7
# days to be added to March 21
__a : List[Any] = (1_9 * metonic_cycle + secular_moon_shift) % 3_0
# PHM -> Paschal Full Moon
__a : List[Any] = (
2 * julian_leap_year
+ 4 * non_leap_year
+ 6 * days_to_add
+ century_starting_point
) % 7
if days_to_add == 2_9 and days_from_phm_to_sunday == 6:
return datetime(lowerCamelCase_ , 4 , 1_9 )
elif days_to_add == 2_8 and days_from_phm_to_sunday == 6:
return datetime(lowerCamelCase_ , 4 , 1_8 )
else:
return datetime(lowerCamelCase_ , 3 , 2_2 ) + timedelta(
days=int(days_to_add + days_from_phm_to_sunday ) )
if __name__ == "__main__":
for year in (1994, 2000, 2010, 2021, 2023):
SCREAMING_SNAKE_CASE__ = '''will be''' if year > datetime.now().year else '''was'''
print(F"Easter in {year} {tense} {gauss_easter(year)}")
| 47 |
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_tf
if is_tf_available():
import tensorflow as tf
from tensorflow.python.eager import context
from tensorflow.python.framework import ops
from transformers import GradientAccumulator, create_optimizer
@require_tf
class _UpperCamelCase( unittest.TestCase ):
def __lowerCAmelCase ( self : List[str] , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : Union[str, Any] ):
'''simple docstring'''
self.assertEqual(len(SCREAMING_SNAKE_CASE__ ) , len(SCREAMING_SNAKE_CASE__ ) )
for a, b in zip(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
self.assertAlmostEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , delta=SCREAMING_SNAKE_CASE__ )
def __lowerCAmelCase ( self : Any ):
'''simple docstring'''
__a : List[Any] = GradientAccumulator()
accumulator([tf.constant([1.0, 2.0] )] )
accumulator([tf.constant([-2.0, 1.0] )] )
accumulator([tf.constant([-1.0, 2.0] )] )
with self.assertRaises(SCREAMING_SNAKE_CASE__ ):
accumulator([tf.constant([1.0, 1.0] ), tf.constant([2.0, 2.0] )] )
self.assertEqual(accumulator.step , 3 )
self.assertEqual(len(accumulator.gradients ) , 1 )
self.assertListAlmostEqual(accumulator.gradients[0].numpy().tolist() , [-2.0, 5.0] , tol=1e-2 )
accumulator.reset()
self.assertEqual(accumulator.step , 0 )
self.assertListAlmostEqual(accumulator.gradients[0].numpy().tolist() , [0.0, 0.0] , tol=1e-2 )
def __lowerCAmelCase ( self : Dict ):
'''simple docstring'''
__a : int = None
ops.enable_eager_execution_internal()
__a : Optional[Any] = tf.config.list_physical_devices('CPU' )
if len(SCREAMING_SNAKE_CASE__ ) == 1:
tf.config.set_logical_device_configuration(
physical_devices[0] , [tf.config.LogicalDeviceConfiguration(), tf.config.LogicalDeviceConfiguration()] )
__a : int = tf.config.list_logical_devices(device_type='CPU' )
__a : str = tf.distribute.MirroredStrategy(devices=devices[:2] )
with strategy.scope():
__a : List[str] = GradientAccumulator()
__a : Tuple = tf.Variable([4.0, 3.0] )
__a , __a : int = create_optimizer(5e-5 , 1_0 , 5 )
__a : List[Any] = tf.Variable([0.0, 0.0] , trainable=SCREAMING_SNAKE_CASE__ )
def accumulate_on_replica(SCREAMING_SNAKE_CASE__ : Optional[Any] ):
accumulator([gradient] )
def apply_on_replica():
optimizer.apply_gradients(list(zip(accumulator.gradients , [variable] ) ) )
@tf.function
def accumulate(SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Tuple ):
with strategy.scope():
__a : Optional[Any] = strategy.experimental_local_results(SCREAMING_SNAKE_CASE__ )
local_variables[0].assign(SCREAMING_SNAKE_CASE__ )
local_variables[1].assign(SCREAMING_SNAKE_CASE__ )
strategy.run(SCREAMING_SNAKE_CASE__ , args=(gradient_placeholder,) )
@tf.function
def apply_grad():
with strategy.scope():
strategy.run(SCREAMING_SNAKE_CASE__ )
def _check_local_values(SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : int ):
__a : Union[str, Any] = strategy.experimental_local_results(accumulator._gradients[0] )
self.assertListAlmostEqual(values[0].value() , SCREAMING_SNAKE_CASE__ , tol=1e-2 )
self.assertListAlmostEqual(values[1].value() , SCREAMING_SNAKE_CASE__ , tol=1e-2 )
accumulate([1.0, 2.0] , [-1.0, 1.0] )
accumulate([3.0, -1.0] , [-1.0, -1.0] )
accumulate([-2.0, 2.0] , [3.0, -2.0] )
self.assertEqual(accumulator.step , 3 )
_check_local_values([2.0, 3.0] , [1.0, -2.0] )
apply_grad()
self.assertListAlmostEqual(variable.value() , [4.0, 3.0] , tol=1e-2 )
accumulator.reset()
self.assertEqual(accumulator.step , 0 )
_check_local_values([0.0, 0.0] , [0.0, 0.0] )
| 47 | 1 |
SCREAMING_SNAKE_CASE__ = '''Tobias Carryer'''
from time import time
class _UpperCamelCase:
def __init__( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : List[str]=int(time() ) ): # noqa: B008
'''simple docstring'''
__a : Union[str, Any] = multiplier
__a : List[Any] = increment
__a : Dict = modulo
__a : Dict = seed
def __lowerCAmelCase ( self : Tuple ):
'''simple docstring'''
__a : Union[str, Any] = (self.multiplier * self.seed + self.increment) % self.modulo
return self.seed
if __name__ == "__main__":
# Show the LCG in action.
SCREAMING_SNAKE_CASE__ = LinearCongruentialGenerator(166_4525, 10_1390_4223, 2 << 31)
while True:
print(lcg.next_number())
| 47 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ = {
'''roberta-base''': '''https://huggingface.co/roberta-base/resolve/main/config.json''',
'''roberta-large''': '''https://huggingface.co/roberta-large/resolve/main/config.json''',
'''roberta-large-mnli''': '''https://huggingface.co/roberta-large-mnli/resolve/main/config.json''',
'''distilroberta-base''': '''https://huggingface.co/distilroberta-base/resolve/main/config.json''',
'''roberta-base-openai-detector''': '''https://huggingface.co/roberta-base-openai-detector/resolve/main/config.json''',
'''roberta-large-openai-detector''': '''https://huggingface.co/roberta-large-openai-detector/resolve/main/config.json''',
}
class _UpperCamelCase( __lowerCamelCase ):
__SCREAMING_SNAKE_CASE : Union[str, Any] = '''roberta'''
def __init__( self : List[Any] , SCREAMING_SNAKE_CASE__ : Union[str, Any]=5_0_2_6_5 , SCREAMING_SNAKE_CASE__ : Optional[int]=7_6_8 , SCREAMING_SNAKE_CASE__ : str=1_2 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=1_2 , SCREAMING_SNAKE_CASE__ : Optional[Any]=3_0_7_2 , SCREAMING_SNAKE_CASE__ : Any="gelu" , SCREAMING_SNAKE_CASE__ : Optional[Any]=0.1 , SCREAMING_SNAKE_CASE__ : str=0.1 , SCREAMING_SNAKE_CASE__ : Optional[Any]=5_1_2 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=2 , SCREAMING_SNAKE_CASE__ : Any=0.02 , SCREAMING_SNAKE_CASE__ : List[str]=1e-12 , SCREAMING_SNAKE_CASE__ : Optional[Any]=1 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=0 , SCREAMING_SNAKE_CASE__ : List[str]=2 , SCREAMING_SNAKE_CASE__ : Tuple="absolute" , SCREAMING_SNAKE_CASE__ : Any=True , SCREAMING_SNAKE_CASE__ : List[str]=None , **SCREAMING_SNAKE_CASE__ : Any , ):
'''simple docstring'''
super().__init__(pad_token_id=SCREAMING_SNAKE_CASE__ , bos_token_id=SCREAMING_SNAKE_CASE__ , eos_token_id=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
__a : Optional[Any] = vocab_size
__a : Tuple = hidden_size
__a : List[str] = num_hidden_layers
__a : List[Any] = num_attention_heads
__a : str = hidden_act
__a : Optional[Any] = intermediate_size
__a : Dict = hidden_dropout_prob
__a : List[str] = attention_probs_dropout_prob
__a : Optional[Any] = max_position_embeddings
__a : Dict = type_vocab_size
__a : str = initializer_range
__a : List[str] = layer_norm_eps
__a : Optional[int] = position_embedding_type
__a : Union[str, Any] = use_cache
__a : str = classifier_dropout
class _UpperCamelCase( __lowerCamelCase ):
@property
def __lowerCAmelCase ( self : Any ):
'''simple docstring'''
if self.task == "multiple-choice":
__a : List[str] = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
__a : Dict = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
] )
| 47 | 1 |
from pathlib import PurePosixPath
from typing import Optional
import fsspec
from fsspec import AbstractFileSystem
from huggingface_hub.hf_api import DatasetInfo
from ..utils.file_utils import get_authentication_headers_for_url
from ..utils.hub import hf_hub_url
class _UpperCamelCase( __lowerCamelCase ):
__SCREAMING_SNAKE_CASE : Any = ''''''
__SCREAMING_SNAKE_CASE : List[str] = '''hf-legacy''' # "hf://"" is reserved for hffs
def __init__( self : Optional[int] , SCREAMING_SNAKE_CASE__ : Optional[DatasetInfo] = None , SCREAMING_SNAKE_CASE__ : Optional[str] = None , **SCREAMING_SNAKE_CASE__ : int , ):
'''simple docstring'''
super().__init__(self , **SCREAMING_SNAKE_CASE__ )
__a : Dict = repo_info
__a : str = token
__a : List[Any] = None
def __lowerCAmelCase ( self : int ):
'''simple docstring'''
if self.dir_cache is None:
__a : str = {}
for hf_file in self.repo_info.siblings:
# TODO(QL): add sizes
__a : Any = {
'name': hf_file.rfilename,
'size': None,
'type': 'file',
}
self.dir_cache.update(
{
str(SCREAMING_SNAKE_CASE__ ): {'name': str(SCREAMING_SNAKE_CASE__ ), 'size': None, 'type': 'directory'}
for d in list(PurePosixPath(hf_file.rfilename ).parents )[:-1]
} )
def __lowerCAmelCase ( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : str = "rb" , **SCREAMING_SNAKE_CASE__ : Any , ):
'''simple docstring'''
if not isinstance(self.repo_info , SCREAMING_SNAKE_CASE__ ):
raise NotImplementedError(f'''Open is only implemented for dataset repositories, but got {self.repo_info}''' )
__a : int = hf_hub_url(self.repo_info.id , SCREAMING_SNAKE_CASE__ , revision=self.repo_info.sha )
return fsspec.open(
SCREAMING_SNAKE_CASE__ , mode=SCREAMING_SNAKE_CASE__ , headers=get_authentication_headers_for_url(SCREAMING_SNAKE_CASE__ , use_auth_token=self.token ) , client_kwargs={'trust_env': True} , ).open()
def __lowerCAmelCase ( self : int , SCREAMING_SNAKE_CASE__ : Optional[int] , **SCREAMING_SNAKE_CASE__ : Dict ):
'''simple docstring'''
self._get_dirs()
__a : List[str] = self._strip_protocol(SCREAMING_SNAKE_CASE__ )
if path in self.dir_cache:
return self.dir_cache[path]
else:
raise FileNotFoundError(SCREAMING_SNAKE_CASE__ )
def __lowerCAmelCase ( self : List[str] , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : Dict=False , **SCREAMING_SNAKE_CASE__ : int ):
'''simple docstring'''
self._get_dirs()
__a : str = PurePosixPath(path.strip('/' ) )
__a : Optional[Any] = {}
for p, f in self.dir_cache.items():
__a : Union[str, Any] = PurePosixPath(p.strip('/' ) )
__a : Any = p.parent
if root == path:
__a : Union[str, Any] = f
__a : str = list(paths.values() )
if detail:
return out
else:
return sorted(f['name'] for f in out )
| 47 |
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ = '''▁'''
SCREAMING_SNAKE_CASE__ = {'''vocab_file''': '''sentencepiece.bpe.model'''}
SCREAMING_SNAKE_CASE__ = {
'''vocab_file''': {
'''facebook/xglm-564M''': '''https://huggingface.co/facebook/xglm-564M/resolve/main/sentencepiece.bpe.model''',
}
}
SCREAMING_SNAKE_CASE__ = {
'''facebook/xglm-564M''': 2048,
}
class _UpperCamelCase( __lowerCamelCase ):
__SCREAMING_SNAKE_CASE : Optional[Any] = VOCAB_FILES_NAMES
__SCREAMING_SNAKE_CASE : Union[str, Any] = PRETRAINED_VOCAB_FILES_MAP
__SCREAMING_SNAKE_CASE : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__SCREAMING_SNAKE_CASE : Any = ['''input_ids''', '''attention_mask''']
def __init__( self : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : List[Any]="<s>" , SCREAMING_SNAKE_CASE__ : List[str]="</s>" , SCREAMING_SNAKE_CASE__ : Optional[Any]="</s>" , SCREAMING_SNAKE_CASE__ : Union[str, Any]="<s>" , SCREAMING_SNAKE_CASE__ : str="<unk>" , SCREAMING_SNAKE_CASE__ : Dict="<pad>" , SCREAMING_SNAKE_CASE__ : Optional[Dict[str, Any]] = None , **SCREAMING_SNAKE_CASE__ : List[str] , ):
'''simple docstring'''
__a : Union[str, Any] = {} if sp_model_kwargs is None else sp_model_kwargs
# Compatibility with the original tokenizer
__a : Any = 7
__a : Union[str, Any] = [f'''<madeupword{i}>''' for i in range(self.num_madeup_words )]
__a : Union[str, Any] = kwargs.get('additional_special_tokens' , [] )
kwargs["additional_special_tokens"] += [
word for word in madeup_words if word not in kwargs["additional_special_tokens"]
]
super().__init__(
bos_token=SCREAMING_SNAKE_CASE__ , eos_token=SCREAMING_SNAKE_CASE__ , unk_token=SCREAMING_SNAKE_CASE__ , sep_token=SCREAMING_SNAKE_CASE__ , cls_token=SCREAMING_SNAKE_CASE__ , pad_token=SCREAMING_SNAKE_CASE__ , sp_model_kwargs=self.sp_model_kwargs , **SCREAMING_SNAKE_CASE__ , )
__a : List[str] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(SCREAMING_SNAKE_CASE__ ) )
__a : str = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
__a : Any = 1
# Mimic fairseq token-to-id alignment for the first 4 token
__a : str = {'<s>': 0, '<pad>': 1, '</s>': 2, '<unk>': 3}
__a : List[str] = len(self.sp_model )
__a : Optional[int] = {f'''<madeupword{i}>''': sp_size + i + self.fairseq_offset for i in range(self.num_madeup_words )}
self.fairseq_tokens_to_ids.update(SCREAMING_SNAKE_CASE__ )
__a : Dict = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def __getstate__( self : List[str] ):
'''simple docstring'''
__a : Tuple = self.__dict__.copy()
__a : List[str] = None
__a : Optional[int] = self.sp_model.serialized_model_proto()
return state
def __setstate__( self : List[str] , SCREAMING_SNAKE_CASE__ : Dict ):
'''simple docstring'''
__a : int = d
# for backward compatibility
if not hasattr(self , 'sp_model_kwargs' ):
__a : Dict = {}
__a : Tuple = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
def __lowerCAmelCase ( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : List[int] , SCREAMING_SNAKE_CASE__ : Optional[List[int]] = None ):
'''simple docstring'''
if token_ids_a is None:
return [self.sep_token_id] + token_ids_a
__a : Optional[Any] = [self.sep_token_id]
return sep + token_ids_a + sep + sep + token_ids_a
def __lowerCAmelCase ( self : Dict , SCREAMING_SNAKE_CASE__ : List[int] , SCREAMING_SNAKE_CASE__ : Optional[List[int]] = None , SCREAMING_SNAKE_CASE__ : bool = False ):
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=SCREAMING_SNAKE_CASE__ , token_ids_a=SCREAMING_SNAKE_CASE__ , already_has_special_tokens=SCREAMING_SNAKE_CASE__ )
if token_ids_a is None:
return [1] + ([0] * len(SCREAMING_SNAKE_CASE__ ))
return [1] + ([0] * len(SCREAMING_SNAKE_CASE__ )) + [1, 1] + ([0] * len(SCREAMING_SNAKE_CASE__ ))
def __lowerCAmelCase ( self : List[Any] , SCREAMING_SNAKE_CASE__ : List[int] , SCREAMING_SNAKE_CASE__ : Optional[List[int]] = None ):
'''simple docstring'''
__a : Optional[int] = [self.sep_token_id]
if token_ids_a is None:
return len(sep + token_ids_a ) * [0]
return len(sep + token_ids_a + sep + sep + token_ids_a ) * [0]
@property
def __lowerCAmelCase ( self : Optional[int] ):
'''simple docstring'''
return len(self.sp_model ) + self.fairseq_offset + self.num_madeup_words
def __lowerCAmelCase ( self : Tuple ):
'''simple docstring'''
__a : str = {self.convert_ids_to_tokens(SCREAMING_SNAKE_CASE__ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __lowerCAmelCase ( self : Any , SCREAMING_SNAKE_CASE__ : str ):
'''simple docstring'''
return self.sp_model.encode(SCREAMING_SNAKE_CASE__ , out_type=SCREAMING_SNAKE_CASE__ )
def __lowerCAmelCase ( self : List[Any] , SCREAMING_SNAKE_CASE__ : List[Any] ):
'''simple docstring'''
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
__a : List[str] = self.sp_model.PieceToId(SCREAMING_SNAKE_CASE__ )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def __lowerCAmelCase ( self : Optional[int] , SCREAMING_SNAKE_CASE__ : Dict ):
'''simple docstring'''
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def __lowerCAmelCase ( self : Tuple , SCREAMING_SNAKE_CASE__ : List[Any] ):
'''simple docstring'''
__a : Optional[int] = ''.join(SCREAMING_SNAKE_CASE__ ).replace(SCREAMING_SNAKE_CASE__ , ' ' ).strip()
return out_string
def __lowerCAmelCase ( self : Dict , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Optional[str] = None ):
'''simple docstring'''
if not os.path.isdir(SCREAMING_SNAKE_CASE__ ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
__a : Any = os.path.join(
SCREAMING_SNAKE_CASE__ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(SCREAMING_SNAKE_CASE__ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , SCREAMING_SNAKE_CASE__ )
elif not os.path.isfile(self.vocab_file ):
with open(SCREAMING_SNAKE_CASE__ , 'wb' ) as fi:
__a : List[Any] = self.sp_model.serialized_model_proto()
fi.write(SCREAMING_SNAKE_CASE__ )
return (out_vocab_file,)
| 47 | 1 |
import torch
from diffusers import DDPMParallelScheduler
from .test_schedulers import SchedulerCommonTest
class _UpperCamelCase( __lowerCamelCase ):
__SCREAMING_SNAKE_CASE : Dict = (DDPMParallelScheduler,)
def __lowerCAmelCase ( self : Optional[int] , **SCREAMING_SNAKE_CASE__ : str ):
'''simple docstring'''
__a : str = {
'num_train_timesteps': 1_0_0_0,
'beta_start': 0.0_001,
'beta_end': 0.02,
'beta_schedule': 'linear',
'variance_type': 'fixed_small',
'clip_sample': True,
}
config.update(**SCREAMING_SNAKE_CASE__ )
return config
def __lowerCAmelCase ( self : List[str] ):
'''simple docstring'''
for timesteps in [1, 5, 1_0_0, 1_0_0_0]:
self.check_over_configs(num_train_timesteps=SCREAMING_SNAKE_CASE__ )
def __lowerCAmelCase ( self : Union[str, Any] ):
'''simple docstring'''
for beta_start, beta_end in zip([0.0_001, 0.001, 0.01, 0.1] , [0.002, 0.02, 0.2, 2] ):
self.check_over_configs(beta_start=SCREAMING_SNAKE_CASE__ , beta_end=SCREAMING_SNAKE_CASE__ )
def __lowerCAmelCase ( self : Optional[int] ):
'''simple docstring'''
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=SCREAMING_SNAKE_CASE__ )
def __lowerCAmelCase ( self : Union[str, Any] ):
'''simple docstring'''
for variance in ["fixed_small", "fixed_large", "other"]:
self.check_over_configs(variance_type=SCREAMING_SNAKE_CASE__ )
def __lowerCAmelCase ( self : int ):
'''simple docstring'''
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=SCREAMING_SNAKE_CASE__ )
def __lowerCAmelCase ( self : str ):
'''simple docstring'''
self.check_over_configs(thresholding=SCREAMING_SNAKE_CASE__ )
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(
thresholding=SCREAMING_SNAKE_CASE__ , prediction_type=SCREAMING_SNAKE_CASE__ , sample_max_value=SCREAMING_SNAKE_CASE__ , )
def __lowerCAmelCase ( self : Dict ):
'''simple docstring'''
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(prediction_type=SCREAMING_SNAKE_CASE__ )
def __lowerCAmelCase ( self : Dict ):
'''simple docstring'''
for t in [0, 5_0_0, 9_9_9]:
self.check_over_forward(time_step=SCREAMING_SNAKE_CASE__ )
def __lowerCAmelCase ( self : str ):
'''simple docstring'''
__a : Union[str, Any] = self.scheduler_classes[0]
__a : Dict = self.get_scheduler_config()
__a : Optional[int] = scheduler_class(**SCREAMING_SNAKE_CASE__ )
assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 0.0 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(4_8_7 ) - 0.00_979 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(9_9_9 ) - 0.02 ) ) < 1e-5
def __lowerCAmelCase ( self : str ):
'''simple docstring'''
__a : int = self.scheduler_classes[0]
__a : Dict = self.get_scheduler_config()
__a : Union[str, Any] = scheduler_class(**SCREAMING_SNAKE_CASE__ )
__a : Optional[int] = len(SCREAMING_SNAKE_CASE__ )
__a : Dict = self.dummy_model()
__a : Optional[int] = self.dummy_sample_deter
__a : Any = self.dummy_sample_deter + 0.1
__a : Any = self.dummy_sample_deter - 0.1
__a : Optional[int] = samplea.shape[0]
__a : int = torch.stack([samplea, samplea, samplea] , dim=0 )
__a : Dict = torch.arange(SCREAMING_SNAKE_CASE__ )[0:3, None].repeat(1 , SCREAMING_SNAKE_CASE__ )
__a : List[Any] = model(samples.flatten(0 , 1 ) , timesteps.flatten(0 , 1 ) )
__a : Optional[int] = scheduler.batch_step_no_noise(SCREAMING_SNAKE_CASE__ , timesteps.flatten(0 , 1 ) , samples.flatten(0 , 1 ) )
__a : Tuple = torch.sum(torch.abs(SCREAMING_SNAKE_CASE__ ) )
__a : Dict = torch.mean(torch.abs(SCREAMING_SNAKE_CASE__ ) )
assert abs(result_sum.item() - 1_153.1_833 ) < 1e-2
assert abs(result_mean.item() - 0.5_005 ) < 1e-3
def __lowerCAmelCase ( self : Any ):
'''simple docstring'''
__a : Union[str, Any] = self.scheduler_classes[0]
__a : int = self.get_scheduler_config()
__a : Dict = scheduler_class(**SCREAMING_SNAKE_CASE__ )
__a : List[str] = len(SCREAMING_SNAKE_CASE__ )
__a : Dict = self.dummy_model()
__a : Any = self.dummy_sample_deter
__a : List[str] = torch.manual_seed(0 )
for t in reversed(range(SCREAMING_SNAKE_CASE__ ) ):
# 1. predict noise residual
__a : str = model(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# 2. predict previous mean of sample x_t-1
__a : int = scheduler.step(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , generator=SCREAMING_SNAKE_CASE__ ).prev_sample
__a : List[Any] = pred_prev_sample
__a : List[Any] = torch.sum(torch.abs(SCREAMING_SNAKE_CASE__ ) )
__a : Optional[int] = torch.mean(torch.abs(SCREAMING_SNAKE_CASE__ ) )
assert abs(result_sum.item() - 258.9_606 ) < 1e-2
assert abs(result_mean.item() - 0.3_372 ) < 1e-3
def __lowerCAmelCase ( self : List[Any] ):
'''simple docstring'''
__a : Tuple = self.scheduler_classes[0]
__a : int = self.get_scheduler_config(prediction_type='v_prediction' )
__a : str = scheduler_class(**SCREAMING_SNAKE_CASE__ )
__a : str = len(SCREAMING_SNAKE_CASE__ )
__a : Optional[int] = self.dummy_model()
__a : Optional[Any] = self.dummy_sample_deter
__a : int = torch.manual_seed(0 )
for t in reversed(range(SCREAMING_SNAKE_CASE__ ) ):
# 1. predict noise residual
__a : Dict = model(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# 2. predict previous mean of sample x_t-1
__a : str = scheduler.step(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , generator=SCREAMING_SNAKE_CASE__ ).prev_sample
__a : List[Any] = pred_prev_sample
__a : Optional[int] = torch.sum(torch.abs(SCREAMING_SNAKE_CASE__ ) )
__a : List[str] = torch.mean(torch.abs(SCREAMING_SNAKE_CASE__ ) )
assert abs(result_sum.item() - 202.0_296 ) < 1e-2
assert abs(result_mean.item() - 0.2_631 ) < 1e-3
def __lowerCAmelCase ( self : List[Any] ):
'''simple docstring'''
__a : List[Any] = self.scheduler_classes[0]
__a : Tuple = self.get_scheduler_config()
__a : List[Any] = scheduler_class(**SCREAMING_SNAKE_CASE__ )
__a : List[str] = [1_0_0, 8_7, 5_0, 1, 0]
scheduler.set_timesteps(timesteps=SCREAMING_SNAKE_CASE__ )
__a : Any = scheduler.timesteps
for i, timestep in enumerate(SCREAMING_SNAKE_CASE__ ):
if i == len(SCREAMING_SNAKE_CASE__ ) - 1:
__a : List[str] = -1
else:
__a : Dict = timesteps[i + 1]
__a : Any = scheduler.previous_timestep(SCREAMING_SNAKE_CASE__ )
__a : Optional[Any] = prev_t.item()
self.assertEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def __lowerCAmelCase ( self : int ):
'''simple docstring'''
__a : List[str] = self.scheduler_classes[0]
__a : Union[str, Any] = self.get_scheduler_config()
__a : Union[str, Any] = scheduler_class(**SCREAMING_SNAKE_CASE__ )
__a : int = [1_0_0, 8_7, 5_0, 5_1, 0]
with self.assertRaises(SCREAMING_SNAKE_CASE__ , msg='`custom_timesteps` must be in descending order.' ):
scheduler.set_timesteps(timesteps=SCREAMING_SNAKE_CASE__ )
def __lowerCAmelCase ( self : List[str] ):
'''simple docstring'''
__a : Tuple = self.scheduler_classes[0]
__a : Dict = self.get_scheduler_config()
__a : List[Any] = scheduler_class(**SCREAMING_SNAKE_CASE__ )
__a : str = [1_0_0, 8_7, 5_0, 1, 0]
__a : int = len(SCREAMING_SNAKE_CASE__ )
with self.assertRaises(SCREAMING_SNAKE_CASE__ , msg='Can only pass one of `num_inference_steps` or `custom_timesteps`.' ):
scheduler.set_timesteps(num_inference_steps=SCREAMING_SNAKE_CASE__ , timesteps=SCREAMING_SNAKE_CASE__ )
def __lowerCAmelCase ( self : str ):
'''simple docstring'''
__a : str = self.scheduler_classes[0]
__a : Optional[int] = self.get_scheduler_config()
__a : Any = scheduler_class(**SCREAMING_SNAKE_CASE__ )
__a : Tuple = [scheduler.config.num_train_timesteps]
with self.assertRaises(
SCREAMING_SNAKE_CASE__ , msg='`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}' , ):
scheduler.set_timesteps(timesteps=SCREAMING_SNAKE_CASE__ )
| 47 |
import argparse
from collections import OrderedDict
from pathlib import Path
import torch
from transformers import (
VisualBertConfig,
VisualBertForMultipleChoice,
VisualBertForPreTraining,
VisualBertForQuestionAnswering,
VisualBertForVisualReasoning,
)
from transformers.utils import logging
logging.set_verbosity_info()
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ = [
('''bert.bert''', '''visual_bert'''),
('''bert.cls''', '''cls'''),
('''bert.classifier''', '''cls'''),
('''token_type_embeddings_visual''', '''visual_token_type_embeddings'''),
('''position_embeddings_visual''', '''visual_position_embeddings'''),
('''projection''', '''visual_projection'''),
]
SCREAMING_SNAKE_CASE__ = [
'''nlvr2_coco_pre_trained.th''',
'''nlvr2_fine_tuned.th''',
'''nlvr2_pre_trained.th''',
'''vcr_coco_pre_train.th''',
'''vcr_fine_tune.th''',
'''vcr_pre_train.th''',
'''vqa_coco_pre_trained.th''',
'''vqa_fine_tuned.th''',
'''vqa_pre_trained.th''',
]
def UpperCAmelCase__ ( lowerCamelCase_ : Optional[int] ):
__a : str = torch.load(lowerCamelCase_ , map_location='cpu' )
return sd
def UpperCAmelCase__ ( lowerCamelCase_ : List[Any] , lowerCamelCase_ : int , lowerCamelCase_ : Dict=rename_keys_prefix ):
__a : Optional[Any] = OrderedDict()
__a : Any = torch.arange(config.max_position_embeddings ).expand((1, -1) )
# detector_d = OrderedDict()
for key in d:
if "detector" in key:
# detector_d[key.replace('detector.','')] = d[key]
continue
__a : List[Any] = key
for name_pair in rename_keys_prefix:
__a : List[str] = new_key.replace(name_pair[0] , name_pair[1] )
__a : Any = d[key]
if key == "bert.cls.predictions.decoder.weight":
# Old bert code didn't have `decoder.bias`, but was added separately
__a : int = new_d['cls.predictions.bias']
return new_d
@torch.no_grad()
def UpperCAmelCase__ ( lowerCamelCase_ : Any , lowerCamelCase_ : Any ):
assert (
checkpoint_path.split('/' )[-1] in ACCEPTABLE_CHECKPOINTS
), f'''The checkpoint provided must be in {ACCEPTABLE_CHECKPOINTS}.'''
# Get Config
if "pre" in checkpoint_path:
__a : Dict = 'pretraining'
if "vcr" in checkpoint_path:
__a : int = {'visual_embedding_dim': 5_1_2}
elif "vqa_advanced" in checkpoint_path:
__a : int = {'visual_embedding_dim': 2_0_4_8}
elif "vqa" in checkpoint_path:
__a : Tuple = {'visual_embedding_dim': 2_0_4_8}
elif "nlvr" in checkpoint_path:
__a : List[Any] = {'visual_embedding_dim': 1_0_2_4}
else:
raise NotImplementedError(f'''No implementation found for `{checkpoint_path}`.''' )
else:
if "vcr" in checkpoint_path:
__a : int = {'visual_embedding_dim': 5_1_2}
__a : Any = 'multichoice'
elif "vqa_advanced" in checkpoint_path:
__a : Any = {'visual_embedding_dim': 2_0_4_8}
__a : List[str] = 'vqa_advanced'
elif "vqa" in checkpoint_path:
__a : List[Any] = {'visual_embedding_dim': 2_0_4_8, 'num_labels': 3_1_2_9}
__a : List[Any] = 'vqa'
elif "nlvr" in checkpoint_path:
__a : Optional[int] = {
'visual_embedding_dim': 1_0_2_4,
'num_labels': 2,
}
__a : Optional[Any] = 'nlvr'
__a : str = VisualBertConfig(**lowerCamelCase_ )
# Load State Dict
__a : str = load_state_dict(lowerCamelCase_ )
__a : str = get_new_dict(lowerCamelCase_ , lowerCamelCase_ )
if model_type == "pretraining":
__a : Optional[Any] = VisualBertForPreTraining(lowerCamelCase_ )
elif model_type == "vqa":
__a : Any = VisualBertForQuestionAnswering(lowerCamelCase_ )
elif model_type == "nlvr":
__a : int = VisualBertForVisualReasoning(lowerCamelCase_ )
elif model_type == "multichoice":
__a : Optional[int] = VisualBertForMultipleChoice(lowerCamelCase_ )
model.load_state_dict(lowerCamelCase_ )
# Save Checkpoints
Path(lowerCamelCase_ ).mkdir(exist_ok=lowerCamelCase_ )
model.save_pretrained(lowerCamelCase_ )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument('''orig_checkpoint_path''', type=str, help='''A path to .th on local filesystem.''')
parser.add_argument('''pytorch_dump_folder_path''', type=str, help='''Path to the output PyTorch model.''')
SCREAMING_SNAKE_CASE__ = parser.parse_args()
convert_visual_bert_checkpoint(args.orig_checkpoint_path, args.pytorch_dump_folder_path)
| 47 | 1 |
import warnings
from ...configuration_utils import PretrainedConfig
from ...utils import logging
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ = {
'''xlnet-base-cased''': '''https://huggingface.co/xlnet-base-cased/resolve/main/config.json''',
'''xlnet-large-cased''': '''https://huggingface.co/xlnet-large-cased/resolve/main/config.json''',
}
class _UpperCamelCase( __lowerCamelCase ):
__SCREAMING_SNAKE_CASE : str = '''xlnet'''
__SCREAMING_SNAKE_CASE : Dict = ['''mems''']
__SCREAMING_SNAKE_CASE : Dict = {
'''n_token''': '''vocab_size''', # Backward compatibility
'''hidden_size''': '''d_model''',
'''num_attention_heads''': '''n_head''',
'''num_hidden_layers''': '''n_layer''',
}
def __init__( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : List[str]=3_2_0_0_0 , SCREAMING_SNAKE_CASE__ : Any=1_0_2_4 , SCREAMING_SNAKE_CASE__ : Dict=2_4 , SCREAMING_SNAKE_CASE__ : List[str]=1_6 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=4_0_9_6 , SCREAMING_SNAKE_CASE__ : Optional[int]="gelu" , SCREAMING_SNAKE_CASE__ : int=True , SCREAMING_SNAKE_CASE__ : str="bi" , SCREAMING_SNAKE_CASE__ : Union[str, Any]=0.02 , SCREAMING_SNAKE_CASE__ : Tuple=1e-12 , SCREAMING_SNAKE_CASE__ : List[Any]=0.1 , SCREAMING_SNAKE_CASE__ : int=5_1_2 , SCREAMING_SNAKE_CASE__ : Dict=None , SCREAMING_SNAKE_CASE__ : Tuple=True , SCREAMING_SNAKE_CASE__ : str=False , SCREAMING_SNAKE_CASE__ : Tuple=False , SCREAMING_SNAKE_CASE__ : Optional[Any]=-1 , SCREAMING_SNAKE_CASE__ : Dict=False , SCREAMING_SNAKE_CASE__ : Any="last" , SCREAMING_SNAKE_CASE__ : int=True , SCREAMING_SNAKE_CASE__ : List[Any]="tanh" , SCREAMING_SNAKE_CASE__ : List[Any]=0.1 , SCREAMING_SNAKE_CASE__ : int=5 , SCREAMING_SNAKE_CASE__ : List[Any]=5 , SCREAMING_SNAKE_CASE__ : int=5 , SCREAMING_SNAKE_CASE__ : Tuple=1 , SCREAMING_SNAKE_CASE__ : Tuple=2 , **SCREAMING_SNAKE_CASE__ : Tuple , ):
'''simple docstring'''
__a : Tuple = vocab_size
__a : Dict = d_model
__a : str = n_layer
__a : str = n_head
if d_model % n_head != 0:
raise ValueError(f'''\'d_model % n_head\' ({d_model % n_head}) should be equal to 0''' )
if "d_head" in kwargs:
if kwargs["d_head"] != d_model // n_head:
raise ValueError(
f'''`d_head` ({kwargs["d_head"]}) should be equal to `d_model // n_head` ({d_model // n_head})''' )
__a : str = d_model // n_head
__a : str = ff_activation
__a : Any = d_inner
__a : Dict = untie_r
__a : List[str] = attn_type
__a : str = initializer_range
__a : Tuple = layer_norm_eps
__a : List[str] = dropout
__a : str = mem_len
__a : List[Any] = reuse_len
__a : List[Any] = bi_data
__a : str = clamp_len
__a : List[str] = same_length
__a : Optional[int] = summary_type
__a : Any = summary_use_proj
__a : List[str] = summary_activation
__a : List[Any] = summary_last_dropout
__a : Union[str, Any] = start_n_top
__a : str = end_n_top
__a : List[str] = bos_token_id
__a : Union[str, Any] = pad_token_id
__a : Tuple = eos_token_id
if "use_cache" in kwargs:
warnings.warn(
'The `use_cache` argument is deprecated and will be removed in a future version, use `use_mems_eval`'
' instead.' , SCREAMING_SNAKE_CASE__ , )
__a : Dict = kwargs['use_cache']
__a : List[Any] = use_mems_eval
__a : List[Any] = use_mems_train
super().__init__(pad_token_id=SCREAMING_SNAKE_CASE__ , bos_token_id=SCREAMING_SNAKE_CASE__ , eos_token_id=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
@property
def __lowerCAmelCase ( self : Optional[Any] ):
'''simple docstring'''
logger.info(f'''The model {self.model_type} is one of the few models that has no sequence length limit.''' )
return -1
@max_position_embeddings.setter
def __lowerCAmelCase ( self : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Union[str, Any] ):
'''simple docstring'''
raise NotImplementedError(
f'''The model {self.model_type} is one of the few models that has no sequence length limit.''' )
| 47 |
print((lambda quine: quine % quine)('''print((lambda quine: quine %% quine)(%r))'''))
| 47 | 1 |
from ..utils import DummyObject, requires_backends
class _UpperCamelCase( metaclass=__lowerCamelCase ):
__SCREAMING_SNAKE_CASE : Optional[Any] = ['''torch''', '''transformers''', '''onnx''']
def __init__( self : Dict , *SCREAMING_SNAKE_CASE__ : Any , **SCREAMING_SNAKE_CASE__ : Any ):
'''simple docstring'''
requires_backends(self , ['torch', 'transformers', 'onnx'] )
@classmethod
def __lowerCAmelCase ( cls : Tuple , *SCREAMING_SNAKE_CASE__ : Optional[Any] , **SCREAMING_SNAKE_CASE__ : Optional[int] ):
'''simple docstring'''
requires_backends(cls , ['torch', 'transformers', 'onnx'] )
@classmethod
def __lowerCAmelCase ( cls : Any , *SCREAMING_SNAKE_CASE__ : Optional[Any] , **SCREAMING_SNAKE_CASE__ : Tuple ):
'''simple docstring'''
requires_backends(cls , ['torch', 'transformers', 'onnx'] )
class _UpperCamelCase( metaclass=__lowerCamelCase ):
__SCREAMING_SNAKE_CASE : List[Any] = ['''torch''', '''transformers''', '''onnx''']
def __init__( self : Any , *SCREAMING_SNAKE_CASE__ : int , **SCREAMING_SNAKE_CASE__ : List[Any] ):
'''simple docstring'''
requires_backends(self , ['torch', 'transformers', 'onnx'] )
@classmethod
def __lowerCAmelCase ( cls : List[str] , *SCREAMING_SNAKE_CASE__ : Tuple , **SCREAMING_SNAKE_CASE__ : int ):
'''simple docstring'''
requires_backends(cls , ['torch', 'transformers', 'onnx'] )
@classmethod
def __lowerCAmelCase ( cls : str , *SCREAMING_SNAKE_CASE__ : int , **SCREAMING_SNAKE_CASE__ : List[Any] ):
'''simple docstring'''
requires_backends(cls , ['torch', 'transformers', 'onnx'] )
class _UpperCamelCase( metaclass=__lowerCamelCase ):
__SCREAMING_SNAKE_CASE : Any = ['''torch''', '''transformers''', '''onnx''']
def __init__( self : Tuple , *SCREAMING_SNAKE_CASE__ : List[str] , **SCREAMING_SNAKE_CASE__ : List[Any] ):
'''simple docstring'''
requires_backends(self , ['torch', 'transformers', 'onnx'] )
@classmethod
def __lowerCAmelCase ( cls : str , *SCREAMING_SNAKE_CASE__ : Any , **SCREAMING_SNAKE_CASE__ : Union[str, Any] ):
'''simple docstring'''
requires_backends(cls , ['torch', 'transformers', 'onnx'] )
@classmethod
def __lowerCAmelCase ( cls : Dict , *SCREAMING_SNAKE_CASE__ : List[Any] , **SCREAMING_SNAKE_CASE__ : int ):
'''simple docstring'''
requires_backends(cls , ['torch', 'transformers', 'onnx'] )
class _UpperCamelCase( metaclass=__lowerCamelCase ):
__SCREAMING_SNAKE_CASE : Dict = ['''torch''', '''transformers''', '''onnx''']
def __init__( self : Any , *SCREAMING_SNAKE_CASE__ : Dict , **SCREAMING_SNAKE_CASE__ : int ):
'''simple docstring'''
requires_backends(self , ['torch', 'transformers', 'onnx'] )
@classmethod
def __lowerCAmelCase ( cls : int , *SCREAMING_SNAKE_CASE__ : Tuple , **SCREAMING_SNAKE_CASE__ : Tuple ):
'''simple docstring'''
requires_backends(cls , ['torch', 'transformers', 'onnx'] )
@classmethod
def __lowerCAmelCase ( cls : Any , *SCREAMING_SNAKE_CASE__ : int , **SCREAMING_SNAKE_CASE__ : str ):
'''simple docstring'''
requires_backends(cls , ['torch', 'transformers', 'onnx'] )
class _UpperCamelCase( metaclass=__lowerCamelCase ):
__SCREAMING_SNAKE_CASE : int = ['''torch''', '''transformers''', '''onnx''']
def __init__( self : List[str] , *SCREAMING_SNAKE_CASE__ : Tuple , **SCREAMING_SNAKE_CASE__ : List[Any] ):
'''simple docstring'''
requires_backends(self , ['torch', 'transformers', 'onnx'] )
@classmethod
def __lowerCAmelCase ( cls : int , *SCREAMING_SNAKE_CASE__ : Tuple , **SCREAMING_SNAKE_CASE__ : Union[str, Any] ):
'''simple docstring'''
requires_backends(cls , ['torch', 'transformers', 'onnx'] )
@classmethod
def __lowerCAmelCase ( cls : Optional[Any] , *SCREAMING_SNAKE_CASE__ : List[Any] , **SCREAMING_SNAKE_CASE__ : int ):
'''simple docstring'''
requires_backends(cls , ['torch', 'transformers', 'onnx'] )
class _UpperCamelCase( metaclass=__lowerCamelCase ):
__SCREAMING_SNAKE_CASE : Optional[int] = ['''torch''', '''transformers''', '''onnx''']
def __init__( self : Optional[int] , *SCREAMING_SNAKE_CASE__ : Dict , **SCREAMING_SNAKE_CASE__ : Any ):
'''simple docstring'''
requires_backends(self , ['torch', 'transformers', 'onnx'] )
@classmethod
def __lowerCAmelCase ( cls : Optional[Any] , *SCREAMING_SNAKE_CASE__ : Tuple , **SCREAMING_SNAKE_CASE__ : Union[str, Any] ):
'''simple docstring'''
requires_backends(cls , ['torch', 'transformers', 'onnx'] )
@classmethod
def __lowerCAmelCase ( cls : int , *SCREAMING_SNAKE_CASE__ : Optional[Any] , **SCREAMING_SNAKE_CASE__ : Tuple ):
'''simple docstring'''
requires_backends(cls , ['torch', 'transformers', 'onnx'] )
| 47 |
import json
import os
import shutil
import tempfile
from unittest import TestCase
from transformers import BartTokenizer, BartTokenizerFast, DPRQuestionEncoderTokenizer, DPRQuestionEncoderTokenizerFast
from transformers.models.bart.configuration_bart import BartConfig
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES as DPR_VOCAB_FILES_NAMES
from transformers.models.dpr.configuration_dpr import DPRConfig
from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES as BART_VOCAB_FILES_NAMES
from transformers.testing_utils import require_faiss, require_tokenizers, require_torch, slow
from transformers.utils import is_datasets_available, is_faiss_available, is_torch_available
if is_torch_available() and is_datasets_available() and is_faiss_available():
from transformers.models.rag.configuration_rag import RagConfig
from transformers.models.rag.tokenization_rag import RagTokenizer
@require_faiss
@require_torch
class _UpperCamelCase( __lowerCamelCase ):
def __lowerCAmelCase ( self : List[Any] ):
'''simple docstring'''
__a : List[Any] = tempfile.mkdtemp()
__a : int = 8
# DPR tok
__a : Dict = [
'[UNK]',
'[CLS]',
'[SEP]',
'[PAD]',
'[MASK]',
'want',
'##want',
'##ed',
'wa',
'un',
'runn',
'##ing',
',',
'low',
'lowest',
]
__a : int = os.path.join(self.tmpdirname , 'dpr_tokenizer' )
os.makedirs(SCREAMING_SNAKE_CASE__ , exist_ok=SCREAMING_SNAKE_CASE__ )
__a : Dict = os.path.join(SCREAMING_SNAKE_CASE__ , DPR_VOCAB_FILES_NAMES['vocab_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) )
# BART tok
__a : str = [
'l',
'o',
'w',
'e',
'r',
's',
't',
'i',
'd',
'n',
'\u0120',
'\u0120l',
'\u0120n',
'\u0120lo',
'\u0120low',
'er',
'\u0120lowest',
'\u0120newer',
'\u0120wider',
'<unk>',
]
__a : Optional[int] = dict(zip(SCREAMING_SNAKE_CASE__ , range(len(SCREAMING_SNAKE_CASE__ ) ) ) )
__a : List[str] = ['#version: 0.2', '\u0120 l', '\u0120l o', '\u0120lo w', 'e r', '']
__a : List[str] = {'unk_token': '<unk>'}
__a : Dict = os.path.join(self.tmpdirname , 'bart_tokenizer' )
os.makedirs(SCREAMING_SNAKE_CASE__ , exist_ok=SCREAMING_SNAKE_CASE__ )
__a : List[Any] = os.path.join(SCREAMING_SNAKE_CASE__ , BART_VOCAB_FILES_NAMES['vocab_file'] )
__a : Dict = os.path.join(SCREAMING_SNAKE_CASE__ , BART_VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp:
fp.write(json.dumps(SCREAMING_SNAKE_CASE__ ) + '\n' )
with open(self.merges_file , 'w' , encoding='utf-8' ) as fp:
fp.write('\n'.join(SCREAMING_SNAKE_CASE__ ) )
def __lowerCAmelCase ( self : Tuple ):
'''simple docstring'''
return DPRQuestionEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname , 'dpr_tokenizer' ) )
def __lowerCAmelCase ( self : str ):
'''simple docstring'''
return BartTokenizer.from_pretrained(os.path.join(self.tmpdirname , 'bart_tokenizer' ) )
def __lowerCAmelCase ( self : Union[str, Any] ):
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
@require_tokenizers
def __lowerCAmelCase ( self : Dict ):
'''simple docstring'''
__a : Tuple = os.path.join(self.tmpdirname , 'rag_tokenizer' )
__a : Optional[Any] = RagConfig(question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() )
__a : Optional[Any] = RagTokenizer(question_encoder=self.get_dpr_tokenizer() , generator=self.get_bart_tokenizer() )
rag_config.save_pretrained(SCREAMING_SNAKE_CASE__ )
rag_tokenizer.save_pretrained(SCREAMING_SNAKE_CASE__ )
__a : List[Any] = RagTokenizer.from_pretrained(SCREAMING_SNAKE_CASE__ , config=SCREAMING_SNAKE_CASE__ )
self.assertIsInstance(new_rag_tokenizer.question_encoder , SCREAMING_SNAKE_CASE__ )
self.assertEqual(new_rag_tokenizer.question_encoder.get_vocab() , rag_tokenizer.question_encoder.get_vocab() )
self.assertIsInstance(new_rag_tokenizer.generator , SCREAMING_SNAKE_CASE__ )
self.assertEqual(new_rag_tokenizer.generator.get_vocab() , rag_tokenizer.generator.get_vocab() )
@slow
def __lowerCAmelCase ( self : Optional[Any] ):
'''simple docstring'''
__a : Optional[Any] = RagTokenizer.from_pretrained('facebook/rag-token-nq' )
__a : List[Any] = [
'who got the first nobel prize in physics',
'when is the next deadpool movie being released',
'which mode is used for short wave broadcast service',
'who is the owner of reading football club',
'when is the next scandal episode coming out',
'when is the last time the philadelphia won the superbowl',
'what is the most current adobe flash player version',
'how many episodes are there in dragon ball z',
'what is the first step in the evolution of the eye',
'where is gall bladder situated in human body',
'what is the main mineral in lithium batteries',
'who is the president of usa right now',
'where do the greasers live in the outsiders',
'panda is a national animal of which country',
'what is the name of manchester united stadium',
]
__a : Tuple = tokenizer(SCREAMING_SNAKE_CASE__ )
self.assertIsNotNone(SCREAMING_SNAKE_CASE__ )
@slow
def __lowerCAmelCase ( self : Optional[int] ):
'''simple docstring'''
__a : Any = RagTokenizer.from_pretrained('facebook/rag-sequence-nq' )
__a : Union[str, Any] = [
'who got the first nobel prize in physics',
'when is the next deadpool movie being released',
'which mode is used for short wave broadcast service',
'who is the owner of reading football club',
'when is the next scandal episode coming out',
'when is the last time the philadelphia won the superbowl',
'what is the most current adobe flash player version',
'how many episodes are there in dragon ball z',
'what is the first step in the evolution of the eye',
'where is gall bladder situated in human body',
'what is the main mineral in lithium batteries',
'who is the president of usa right now',
'where do the greasers live in the outsiders',
'panda is a national animal of which country',
'what is the name of manchester united stadium',
]
__a : str = tokenizer(SCREAMING_SNAKE_CASE__ )
self.assertIsNotNone(SCREAMING_SNAKE_CASE__ )
| 47 | 1 |
from dataclasses import dataclass, field
from typing import Tuple
from ..utils import cached_property, is_torch_available, is_torch_tpu_available, logging, requires_backends
from .benchmark_args_utils import BenchmarkArguments
if is_torch_available():
import torch
if is_torch_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
@dataclass
class _UpperCamelCase( __lowerCamelCase ):
__SCREAMING_SNAKE_CASE : Union[str, Any] = [
'''no_inference''',
'''no_cuda''',
'''no_tpu''',
'''no_speed''',
'''no_memory''',
'''no_env_print''',
'''no_multi_process''',
]
def __init__( self : Optional[int] , **SCREAMING_SNAKE_CASE__ : Any ):
'''simple docstring'''
for deprecated_arg in self.deprecated_args:
if deprecated_arg in kwargs:
__a : Optional[Any] = deprecated_arg[3:]
setattr(self , SCREAMING_SNAKE_CASE__ , not kwargs.pop(SCREAMING_SNAKE_CASE__ ) )
logger.warning(
f'''{deprecated_arg} is depreciated. Please use --no_{positive_arg} or'''
f''' {positive_arg}={kwargs[positive_arg]}''' )
__a : int = kwargs.pop('torchscript' , self.torchscript )
__a : str = kwargs.pop('torch_xla_tpu_print_metrics' , self.torch_xla_tpu_print_metrics )
__a : Dict = kwargs.pop('fp16_opt_level' , self.fpaa_opt_level )
super().__init__(**SCREAMING_SNAKE_CASE__ )
__SCREAMING_SNAKE_CASE : bool = field(default=__lowerCamelCase , metadata={'''help''': '''Trace the models using torchscript'''} )
__SCREAMING_SNAKE_CASE : bool = field(default=__lowerCamelCase , metadata={'''help''': '''Print Xla/PyTorch tpu metrics'''} )
__SCREAMING_SNAKE_CASE : str = field(
default='''O1''' , metadata={
'''help''': (
'''For fp16: Apex AMP optimization level selected in [\'O0\', \'O1\', \'O2\', and \'O3\']. '''
'''See details at https://nvidia.github.io/apex/amp.html'''
)
} , )
@cached_property
def __lowerCAmelCase ( self : Dict ):
'''simple docstring'''
requires_backends(self , ['torch'] )
logger.info('PyTorch: setting up devices' )
if not self.cuda:
__a : Optional[Any] = torch.device('cpu' )
__a : Optional[Any] = 0
elif is_torch_tpu_available():
__a : List[str] = xm.xla_device()
__a : Optional[int] = 0
else:
__a : Dict = torch.device('cuda' if torch.cuda.is_available() else 'cpu' )
__a : Optional[Any] = torch.cuda.device_count()
return device, n_gpu
@property
def __lowerCAmelCase ( self : List[str] ):
'''simple docstring'''
return is_torch_tpu_available() and self.tpu
@property
def __lowerCAmelCase ( self : Dict ):
'''simple docstring'''
requires_backends(self , ['torch'] )
# TODO(PVP): currently only single GPU is supported
return torch.cuda.current_device()
@property
def __lowerCAmelCase ( self : Optional[Any] ):
'''simple docstring'''
requires_backends(self , ['torch'] )
return self._setup_devices[0]
@property
def __lowerCAmelCase ( self : int ):
'''simple docstring'''
requires_backends(self , ['torch'] )
return self._setup_devices[1]
@property
def __lowerCAmelCase ( self : Tuple ):
'''simple docstring'''
return self.n_gpu > 0
| 47 |
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ = {'''vocab_file''': '''spiece.model'''}
SCREAMING_SNAKE_CASE__ = {
'''vocab_file''': {
'''bert_for_seq_generation''': (
'''https://huggingface.co/google/bert_for_seq_generation_L-24_bbc_encoder/resolve/main/spiece.model'''
),
}
}
SCREAMING_SNAKE_CASE__ = {'''bert_for_seq_generation''': 512}
class _UpperCamelCase( __lowerCamelCase ):
__SCREAMING_SNAKE_CASE : int = VOCAB_FILES_NAMES
__SCREAMING_SNAKE_CASE : List[str] = PRETRAINED_VOCAB_FILES_MAP
__SCREAMING_SNAKE_CASE : Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__SCREAMING_SNAKE_CASE : List[int] = []
__SCREAMING_SNAKE_CASE : int = ['''input_ids''', '''attention_mask''']
def __init__( self : str , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : List[Any]="<s>" , SCREAMING_SNAKE_CASE__ : Tuple="</s>" , SCREAMING_SNAKE_CASE__ : Any="<unk>" , SCREAMING_SNAKE_CASE__ : int="<pad>" , SCREAMING_SNAKE_CASE__ : List[str]="<::::>" , SCREAMING_SNAKE_CASE__ : Optional[Dict[str, Any]] = None , **SCREAMING_SNAKE_CASE__ : Tuple , ):
'''simple docstring'''
__a : Tuple = {} if sp_model_kwargs is None else sp_model_kwargs
# Add extra_ids to the special token list
super().__init__(
bos_token=SCREAMING_SNAKE_CASE__ , eos_token=SCREAMING_SNAKE_CASE__ , unk_token=SCREAMING_SNAKE_CASE__ , pad_token=SCREAMING_SNAKE_CASE__ , sep_token=SCREAMING_SNAKE_CASE__ , sp_model_kwargs=self.sp_model_kwargs , **SCREAMING_SNAKE_CASE__ , )
__a : int = vocab_file
__a : Union[str, Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(SCREAMING_SNAKE_CASE__ )
@property
def __lowerCAmelCase ( self : Optional[int] ):
'''simple docstring'''
return self.sp_model.get_piece_size()
def __lowerCAmelCase ( self : int ):
'''simple docstring'''
__a : Dict = {self.convert_ids_to_tokens(SCREAMING_SNAKE_CASE__ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : Optional[Any] ):
'''simple docstring'''
__a : Union[str, Any] = self.__dict__.copy()
__a : Any = None
return state
def __setstate__( self : int , SCREAMING_SNAKE_CASE__ : List[str] ):
'''simple docstring'''
__a : str = d
# for backward compatibility
if not hasattr(self , 'sp_model_kwargs' ):
__a : str = {}
__a : List[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def __lowerCAmelCase ( self : Dict , SCREAMING_SNAKE_CASE__ : str ):
'''simple docstring'''
return self.sp_model.encode(SCREAMING_SNAKE_CASE__ , out_type=SCREAMING_SNAKE_CASE__ )
def __lowerCAmelCase ( self : str , SCREAMING_SNAKE_CASE__ : Optional[int] ):
'''simple docstring'''
return self.sp_model.piece_to_id(SCREAMING_SNAKE_CASE__ )
def __lowerCAmelCase ( self : int , SCREAMING_SNAKE_CASE__ : str ):
'''simple docstring'''
__a : int = self.sp_model.IdToPiece(SCREAMING_SNAKE_CASE__ )
return token
def __lowerCAmelCase ( self : Tuple , SCREAMING_SNAKE_CASE__ : Optional[int] ):
'''simple docstring'''
__a : Optional[Any] = []
__a : Optional[int] = ''
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(SCREAMING_SNAKE_CASE__ ) + token
__a : Dict = []
else:
current_sub_tokens.append(SCREAMING_SNAKE_CASE__ )
out_string += self.sp_model.decode(SCREAMING_SNAKE_CASE__ )
return out_string.strip()
def __lowerCAmelCase ( self : List[Any] , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Optional[str] = None ):
'''simple docstring'''
if not os.path.isdir(SCREAMING_SNAKE_CASE__ ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
__a : Tuple = os.path.join(
SCREAMING_SNAKE_CASE__ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(SCREAMING_SNAKE_CASE__ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , SCREAMING_SNAKE_CASE__ )
elif not os.path.isfile(self.vocab_file ):
with open(SCREAMING_SNAKE_CASE__ , 'wb' ) as fi:
__a : List[str] = self.sp_model.serialized_model_proto()
fi.write(SCREAMING_SNAKE_CASE__ )
return (out_vocab_file,)
| 47 | 1 |
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ = '''▁'''
SCREAMING_SNAKE_CASE__ = {'''vocab_file''': '''sentencepiece.bpe.model'''}
SCREAMING_SNAKE_CASE__ = {
'''vocab_file''': {
'''facebook/xglm-564M''': '''https://huggingface.co/facebook/xglm-564M/resolve/main/sentencepiece.bpe.model''',
}
}
SCREAMING_SNAKE_CASE__ = {
'''facebook/xglm-564M''': 2048,
}
class _UpperCamelCase( __lowerCamelCase ):
__SCREAMING_SNAKE_CASE : Optional[Any] = VOCAB_FILES_NAMES
__SCREAMING_SNAKE_CASE : Union[str, Any] = PRETRAINED_VOCAB_FILES_MAP
__SCREAMING_SNAKE_CASE : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__SCREAMING_SNAKE_CASE : Any = ['''input_ids''', '''attention_mask''']
def __init__( self : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : List[Any]="<s>" , SCREAMING_SNAKE_CASE__ : List[str]="</s>" , SCREAMING_SNAKE_CASE__ : Optional[Any]="</s>" , SCREAMING_SNAKE_CASE__ : Union[str, Any]="<s>" , SCREAMING_SNAKE_CASE__ : str="<unk>" , SCREAMING_SNAKE_CASE__ : Dict="<pad>" , SCREAMING_SNAKE_CASE__ : Optional[Dict[str, Any]] = None , **SCREAMING_SNAKE_CASE__ : List[str] , ):
'''simple docstring'''
__a : Union[str, Any] = {} if sp_model_kwargs is None else sp_model_kwargs
# Compatibility with the original tokenizer
__a : Any = 7
__a : Union[str, Any] = [f'''<madeupword{i}>''' for i in range(self.num_madeup_words )]
__a : Union[str, Any] = kwargs.get('additional_special_tokens' , [] )
kwargs["additional_special_tokens"] += [
word for word in madeup_words if word not in kwargs["additional_special_tokens"]
]
super().__init__(
bos_token=SCREAMING_SNAKE_CASE__ , eos_token=SCREAMING_SNAKE_CASE__ , unk_token=SCREAMING_SNAKE_CASE__ , sep_token=SCREAMING_SNAKE_CASE__ , cls_token=SCREAMING_SNAKE_CASE__ , pad_token=SCREAMING_SNAKE_CASE__ , sp_model_kwargs=self.sp_model_kwargs , **SCREAMING_SNAKE_CASE__ , )
__a : List[str] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(SCREAMING_SNAKE_CASE__ ) )
__a : str = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
__a : Any = 1
# Mimic fairseq token-to-id alignment for the first 4 token
__a : str = {'<s>': 0, '<pad>': 1, '</s>': 2, '<unk>': 3}
__a : List[str] = len(self.sp_model )
__a : Optional[int] = {f'''<madeupword{i}>''': sp_size + i + self.fairseq_offset for i in range(self.num_madeup_words )}
self.fairseq_tokens_to_ids.update(SCREAMING_SNAKE_CASE__ )
__a : Dict = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def __getstate__( self : List[str] ):
'''simple docstring'''
__a : Tuple = self.__dict__.copy()
__a : List[str] = None
__a : Optional[int] = self.sp_model.serialized_model_proto()
return state
def __setstate__( self : List[str] , SCREAMING_SNAKE_CASE__ : Dict ):
'''simple docstring'''
__a : int = d
# for backward compatibility
if not hasattr(self , 'sp_model_kwargs' ):
__a : Dict = {}
__a : Tuple = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
def __lowerCAmelCase ( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : List[int] , SCREAMING_SNAKE_CASE__ : Optional[List[int]] = None ):
'''simple docstring'''
if token_ids_a is None:
return [self.sep_token_id] + token_ids_a
__a : Optional[Any] = [self.sep_token_id]
return sep + token_ids_a + sep + sep + token_ids_a
def __lowerCAmelCase ( self : Dict , SCREAMING_SNAKE_CASE__ : List[int] , SCREAMING_SNAKE_CASE__ : Optional[List[int]] = None , SCREAMING_SNAKE_CASE__ : bool = False ):
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=SCREAMING_SNAKE_CASE__ , token_ids_a=SCREAMING_SNAKE_CASE__ , already_has_special_tokens=SCREAMING_SNAKE_CASE__ )
if token_ids_a is None:
return [1] + ([0] * len(SCREAMING_SNAKE_CASE__ ))
return [1] + ([0] * len(SCREAMING_SNAKE_CASE__ )) + [1, 1] + ([0] * len(SCREAMING_SNAKE_CASE__ ))
def __lowerCAmelCase ( self : List[Any] , SCREAMING_SNAKE_CASE__ : List[int] , SCREAMING_SNAKE_CASE__ : Optional[List[int]] = None ):
'''simple docstring'''
__a : Optional[int] = [self.sep_token_id]
if token_ids_a is None:
return len(sep + token_ids_a ) * [0]
return len(sep + token_ids_a + sep + sep + token_ids_a ) * [0]
@property
def __lowerCAmelCase ( self : Optional[int] ):
'''simple docstring'''
return len(self.sp_model ) + self.fairseq_offset + self.num_madeup_words
def __lowerCAmelCase ( self : Tuple ):
'''simple docstring'''
__a : str = {self.convert_ids_to_tokens(SCREAMING_SNAKE_CASE__ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __lowerCAmelCase ( self : Any , SCREAMING_SNAKE_CASE__ : str ):
'''simple docstring'''
return self.sp_model.encode(SCREAMING_SNAKE_CASE__ , out_type=SCREAMING_SNAKE_CASE__ )
def __lowerCAmelCase ( self : List[Any] , SCREAMING_SNAKE_CASE__ : List[Any] ):
'''simple docstring'''
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
__a : List[str] = self.sp_model.PieceToId(SCREAMING_SNAKE_CASE__ )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def __lowerCAmelCase ( self : Optional[int] , SCREAMING_SNAKE_CASE__ : Dict ):
'''simple docstring'''
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def __lowerCAmelCase ( self : Tuple , SCREAMING_SNAKE_CASE__ : List[Any] ):
'''simple docstring'''
__a : Optional[int] = ''.join(SCREAMING_SNAKE_CASE__ ).replace(SCREAMING_SNAKE_CASE__ , ' ' ).strip()
return out_string
def __lowerCAmelCase ( self : Dict , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Optional[str] = None ):
'''simple docstring'''
if not os.path.isdir(SCREAMING_SNAKE_CASE__ ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
__a : Any = os.path.join(
SCREAMING_SNAKE_CASE__ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(SCREAMING_SNAKE_CASE__ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , SCREAMING_SNAKE_CASE__ )
elif not os.path.isfile(self.vocab_file ):
with open(SCREAMING_SNAKE_CASE__ , 'wb' ) as fi:
__a : List[Any] = self.sp_model.serialized_model_proto()
fi.write(SCREAMING_SNAKE_CASE__ )
return (out_vocab_file,)
| 47 |
from ..utils import DummyObject, requires_backends
class _UpperCamelCase( metaclass=__lowerCamelCase ):
__SCREAMING_SNAKE_CASE : Optional[Any] = ['''torch''', '''transformers''', '''onnx''']
def __init__( self : Dict , *SCREAMING_SNAKE_CASE__ : Any , **SCREAMING_SNAKE_CASE__ : Any ):
'''simple docstring'''
requires_backends(self , ['torch', 'transformers', 'onnx'] )
@classmethod
def __lowerCAmelCase ( cls : Tuple , *SCREAMING_SNAKE_CASE__ : Optional[Any] , **SCREAMING_SNAKE_CASE__ : Optional[int] ):
'''simple docstring'''
requires_backends(cls , ['torch', 'transformers', 'onnx'] )
@classmethod
def __lowerCAmelCase ( cls : Any , *SCREAMING_SNAKE_CASE__ : Optional[Any] , **SCREAMING_SNAKE_CASE__ : Tuple ):
'''simple docstring'''
requires_backends(cls , ['torch', 'transformers', 'onnx'] )
class _UpperCamelCase( metaclass=__lowerCamelCase ):
__SCREAMING_SNAKE_CASE : List[Any] = ['''torch''', '''transformers''', '''onnx''']
def __init__( self : Any , *SCREAMING_SNAKE_CASE__ : int , **SCREAMING_SNAKE_CASE__ : List[Any] ):
'''simple docstring'''
requires_backends(self , ['torch', 'transformers', 'onnx'] )
@classmethod
def __lowerCAmelCase ( cls : List[str] , *SCREAMING_SNAKE_CASE__ : Tuple , **SCREAMING_SNAKE_CASE__ : int ):
'''simple docstring'''
requires_backends(cls , ['torch', 'transformers', 'onnx'] )
@classmethod
def __lowerCAmelCase ( cls : str , *SCREAMING_SNAKE_CASE__ : int , **SCREAMING_SNAKE_CASE__ : List[Any] ):
'''simple docstring'''
requires_backends(cls , ['torch', 'transformers', 'onnx'] )
class _UpperCamelCase( metaclass=__lowerCamelCase ):
__SCREAMING_SNAKE_CASE : Any = ['''torch''', '''transformers''', '''onnx''']
def __init__( self : Tuple , *SCREAMING_SNAKE_CASE__ : List[str] , **SCREAMING_SNAKE_CASE__ : List[Any] ):
'''simple docstring'''
requires_backends(self , ['torch', 'transformers', 'onnx'] )
@classmethod
def __lowerCAmelCase ( cls : str , *SCREAMING_SNAKE_CASE__ : Any , **SCREAMING_SNAKE_CASE__ : Union[str, Any] ):
'''simple docstring'''
requires_backends(cls , ['torch', 'transformers', 'onnx'] )
@classmethod
def __lowerCAmelCase ( cls : Dict , *SCREAMING_SNAKE_CASE__ : List[Any] , **SCREAMING_SNAKE_CASE__ : int ):
'''simple docstring'''
requires_backends(cls , ['torch', 'transformers', 'onnx'] )
class _UpperCamelCase( metaclass=__lowerCamelCase ):
__SCREAMING_SNAKE_CASE : Dict = ['''torch''', '''transformers''', '''onnx''']
def __init__( self : Any , *SCREAMING_SNAKE_CASE__ : Dict , **SCREAMING_SNAKE_CASE__ : int ):
'''simple docstring'''
requires_backends(self , ['torch', 'transformers', 'onnx'] )
@classmethod
def __lowerCAmelCase ( cls : int , *SCREAMING_SNAKE_CASE__ : Tuple , **SCREAMING_SNAKE_CASE__ : Tuple ):
'''simple docstring'''
requires_backends(cls , ['torch', 'transformers', 'onnx'] )
@classmethod
def __lowerCAmelCase ( cls : Any , *SCREAMING_SNAKE_CASE__ : int , **SCREAMING_SNAKE_CASE__ : str ):
'''simple docstring'''
requires_backends(cls , ['torch', 'transformers', 'onnx'] )
class _UpperCamelCase( metaclass=__lowerCamelCase ):
__SCREAMING_SNAKE_CASE : int = ['''torch''', '''transformers''', '''onnx''']
def __init__( self : List[str] , *SCREAMING_SNAKE_CASE__ : Tuple , **SCREAMING_SNAKE_CASE__ : List[Any] ):
'''simple docstring'''
requires_backends(self , ['torch', 'transformers', 'onnx'] )
@classmethod
def __lowerCAmelCase ( cls : int , *SCREAMING_SNAKE_CASE__ : Tuple , **SCREAMING_SNAKE_CASE__ : Union[str, Any] ):
'''simple docstring'''
requires_backends(cls , ['torch', 'transformers', 'onnx'] )
@classmethod
def __lowerCAmelCase ( cls : Optional[Any] , *SCREAMING_SNAKE_CASE__ : List[Any] , **SCREAMING_SNAKE_CASE__ : int ):
'''simple docstring'''
requires_backends(cls , ['torch', 'transformers', 'onnx'] )
class _UpperCamelCase( metaclass=__lowerCamelCase ):
__SCREAMING_SNAKE_CASE : Optional[int] = ['''torch''', '''transformers''', '''onnx''']
def __init__( self : Optional[int] , *SCREAMING_SNAKE_CASE__ : Dict , **SCREAMING_SNAKE_CASE__ : Any ):
'''simple docstring'''
requires_backends(self , ['torch', 'transformers', 'onnx'] )
@classmethod
def __lowerCAmelCase ( cls : Optional[Any] , *SCREAMING_SNAKE_CASE__ : Tuple , **SCREAMING_SNAKE_CASE__ : Union[str, Any] ):
'''simple docstring'''
requires_backends(cls , ['torch', 'transformers', 'onnx'] )
@classmethod
def __lowerCAmelCase ( cls : int , *SCREAMING_SNAKE_CASE__ : Optional[Any] , **SCREAMING_SNAKE_CASE__ : Tuple ):
'''simple docstring'''
requires_backends(cls , ['torch', 'transformers', 'onnx'] )
| 47 | 1 |
def UpperCAmelCase__ ( lowerCamelCase_ : int = 1_0 ):
if not isinstance(lowerCamelCase_ , lowerCamelCase_ ) or n < 0:
raise ValueError('Invalid input' )
__a : Optional[Any] = 1_0**n
__a : Union[str, Any] = 2_8_4_3_3 * (pow(2 , 7_8_3_0_4_5_7 , lowerCamelCase_ )) + 1
return str(number % modulus )
if __name__ == "__main__":
from doctest import testmod
testmod()
print(F"{solution(10) = }")
| 47 |
import math
from datetime import datetime, timedelta
def UpperCAmelCase__ ( lowerCamelCase_ : int ):
__a : Union[str, Any] = year % 1_9
__a : int = year % 4
__a : Optional[int] = year % 7
__a : Dict = math.floor(year / 1_0_0 )
__a : Optional[Any] = math.floor((1_3 + 8 * leap_day_inhibits) / 2_5 )
__a : Union[str, Any] = leap_day_inhibits / 4
__a : str = (
1_5 - lunar_orbit_correction + leap_day_inhibits - leap_day_reinstall_number
) % 3_0
__a : Union[str, Any] = (4 + leap_day_inhibits - leap_day_reinstall_number) % 7
# days to be added to March 21
__a : List[Any] = (1_9 * metonic_cycle + secular_moon_shift) % 3_0
# PHM -> Paschal Full Moon
__a : List[Any] = (
2 * julian_leap_year
+ 4 * non_leap_year
+ 6 * days_to_add
+ century_starting_point
) % 7
if days_to_add == 2_9 and days_from_phm_to_sunday == 6:
return datetime(lowerCamelCase_ , 4 , 1_9 )
elif days_to_add == 2_8 and days_from_phm_to_sunday == 6:
return datetime(lowerCamelCase_ , 4 , 1_8 )
else:
return datetime(lowerCamelCase_ , 3 , 2_2 ) + timedelta(
days=int(days_to_add + days_from_phm_to_sunday ) )
if __name__ == "__main__":
for year in (1994, 2000, 2010, 2021, 2023):
SCREAMING_SNAKE_CASE__ = '''will be''' if year > datetime.now().year else '''was'''
print(F"Easter in {year} {tense} {gauss_easter(year)}")
| 47 | 1 |
import gc
import unittest
from diffusers import FlaxDPMSolverMultistepScheduler, FlaxStableDiffusionPipeline
from diffusers.utils import is_flax_available, slow
from diffusers.utils.testing_utils import require_flax
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
@slow
@require_flax
class _UpperCamelCase( unittest.TestCase ):
def __lowerCAmelCase ( self : Optional[Any] ):
'''simple docstring'''
super().tearDown()
gc.collect()
def __lowerCAmelCase ( self : List[str] ):
'''simple docstring'''
__a , __a : Tuple = FlaxStableDiffusionPipeline.from_pretrained(
'stabilityai/stable-diffusion-2' , revision='bf16' , dtype=jnp.bfloataa , )
__a : int = 'A painting of a squirrel eating a burger'
__a : int = jax.device_count()
__a : List[Any] = num_samples * [prompt]
__a : List[str] = sd_pipe.prepare_inputs(SCREAMING_SNAKE_CASE__ )
__a : Union[str, Any] = replicate(SCREAMING_SNAKE_CASE__ )
__a : Dict = shard(SCREAMING_SNAKE_CASE__ )
__a : List[str] = jax.random.PRNGKey(0 )
__a : List[Any] = jax.random.split(SCREAMING_SNAKE_CASE__ , jax.device_count() )
__a : Optional[int] = sd_pipe(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , num_inference_steps=2_5 , jit=SCREAMING_SNAKE_CASE__ )[0]
assert images.shape == (jax.device_count(), 1, 7_6_8, 7_6_8, 3)
__a : Optional[int] = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
__a : Optional[int] = images[0, 2_5_3:2_5_6, 2_5_3:2_5_6, -1]
__a : Any = jnp.asarray(jax.device_get(image_slice.flatten() ) )
__a : Union[str, Any] = jnp.array([0.4_238, 0.4_414, 0.4_395, 0.4_453, 0.4_629, 0.4_590, 0.4_531, 0.45_508, 0.4_512] )
print(f'''output_slice: {output_slice}''' )
assert jnp.abs(output_slice - expected_slice ).max() < 1e-2
def __lowerCAmelCase ( self : List[Any] ):
'''simple docstring'''
__a : Union[str, Any] = 'stabilityai/stable-diffusion-2'
__a , __a : str = FlaxDPMSolverMultistepScheduler.from_pretrained(SCREAMING_SNAKE_CASE__ , subfolder='scheduler' )
__a , __a : Dict = FlaxStableDiffusionPipeline.from_pretrained(
SCREAMING_SNAKE_CASE__ , scheduler=SCREAMING_SNAKE_CASE__ , revision='bf16' , dtype=jnp.bfloataa , )
__a : List[Any] = scheduler_params
__a : Optional[Any] = 'A painting of a squirrel eating a burger'
__a : List[Any] = jax.device_count()
__a : List[Any] = num_samples * [prompt]
__a : Dict = sd_pipe.prepare_inputs(SCREAMING_SNAKE_CASE__ )
__a : Union[str, Any] = replicate(SCREAMING_SNAKE_CASE__ )
__a : str = shard(SCREAMING_SNAKE_CASE__ )
__a : Union[str, Any] = jax.random.PRNGKey(0 )
__a : List[str] = jax.random.split(SCREAMING_SNAKE_CASE__ , jax.device_count() )
__a : int = sd_pipe(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , num_inference_steps=2_5 , jit=SCREAMING_SNAKE_CASE__ )[0]
assert images.shape == (jax.device_count(), 1, 7_6_8, 7_6_8, 3)
__a : int = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
__a : Union[str, Any] = images[0, 2_5_3:2_5_6, 2_5_3:2_5_6, -1]
__a : str = jnp.asarray(jax.device_get(image_slice.flatten() ) )
__a : List[str] = jnp.array([0.4_336, 0.42_969, 0.4_453, 0.4_199, 0.4_297, 0.4_531, 0.4_434, 0.4_434, 0.4_297] )
print(f'''output_slice: {output_slice}''' )
assert jnp.abs(output_slice - expected_slice ).max() < 1e-2
| 47 |
from typing import List, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ = {
'''huggingface/informer-tourism-monthly''': (
'''https://huggingface.co/huggingface/informer-tourism-monthly/resolve/main/config.json'''
),
# See all Informer models at https://huggingface.co/models?filter=informer
}
class _UpperCamelCase( __lowerCamelCase ):
__SCREAMING_SNAKE_CASE : List[Any] = '''informer'''
__SCREAMING_SNAKE_CASE : List[Any] = {
'''hidden_size''': '''d_model''',
'''num_attention_heads''': '''encoder_attention_heads''',
'''num_hidden_layers''': '''encoder_layers''',
}
def __init__( self : Optional[int] , SCREAMING_SNAKE_CASE__ : Optional[int] = None , SCREAMING_SNAKE_CASE__ : Optional[int] = None , SCREAMING_SNAKE_CASE__ : str = "student_t" , SCREAMING_SNAKE_CASE__ : str = "nll" , SCREAMING_SNAKE_CASE__ : int = 1 , SCREAMING_SNAKE_CASE__ : List[int] = None , SCREAMING_SNAKE_CASE__ : Optional[Union[str, bool]] = "mean" , SCREAMING_SNAKE_CASE__ : int = 0 , SCREAMING_SNAKE_CASE__ : int = 0 , SCREAMING_SNAKE_CASE__ : int = 0 , SCREAMING_SNAKE_CASE__ : int = 0 , SCREAMING_SNAKE_CASE__ : Optional[List[int]] = None , SCREAMING_SNAKE_CASE__ : Optional[List[int]] = None , SCREAMING_SNAKE_CASE__ : int = 6_4 , SCREAMING_SNAKE_CASE__ : int = 3_2 , SCREAMING_SNAKE_CASE__ : int = 3_2 , SCREAMING_SNAKE_CASE__ : int = 2 , SCREAMING_SNAKE_CASE__ : int = 2 , SCREAMING_SNAKE_CASE__ : int = 2 , SCREAMING_SNAKE_CASE__ : int = 2 , SCREAMING_SNAKE_CASE__ : bool = True , SCREAMING_SNAKE_CASE__ : str = "gelu" , SCREAMING_SNAKE_CASE__ : float = 0.05 , SCREAMING_SNAKE_CASE__ : float = 0.1 , SCREAMING_SNAKE_CASE__ : float = 0.1 , SCREAMING_SNAKE_CASE__ : float = 0.1 , SCREAMING_SNAKE_CASE__ : float = 0.1 , SCREAMING_SNAKE_CASE__ : int = 1_0_0 , SCREAMING_SNAKE_CASE__ : float = 0.02 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=True , SCREAMING_SNAKE_CASE__ : str = "prob" , SCREAMING_SNAKE_CASE__ : int = 5 , SCREAMING_SNAKE_CASE__ : bool = True , **SCREAMING_SNAKE_CASE__ : Tuple , ):
'''simple docstring'''
__a : Dict = prediction_length
__a : Tuple = context_length or prediction_length
__a : Tuple = distribution_output
__a : Tuple = loss
__a : str = input_size
__a : Dict = num_time_features
__a : Optional[int] = lags_sequence if lags_sequence is not None else [1, 2, 3, 4, 5, 6, 7]
__a : str = scaling
__a : Tuple = num_dynamic_real_features
__a : int = num_static_real_features
__a : Dict = num_static_categorical_features
# set cardinality
if cardinality and num_static_categorical_features > 0:
if len(SCREAMING_SNAKE_CASE__ ) != num_static_categorical_features:
raise ValueError(
'The cardinality should be a list of the same length as `num_static_categorical_features`' )
__a : Optional[Any] = cardinality
else:
__a : Optional[int] = [0]
# set embedding_dimension
if embedding_dimension and num_static_categorical_features > 0:
if len(SCREAMING_SNAKE_CASE__ ) != num_static_categorical_features:
raise ValueError(
'The embedding dimension should be a list of the same length as `num_static_categorical_features`' )
__a : int = embedding_dimension
else:
__a : List[Any] = [min(5_0 , (cat + 1) // 2 ) for cat in self.cardinality]
__a : int = num_parallel_samples
# Transformer architecture configuration
__a : str = input_size * len(self.lags_sequence ) + self._number_of_features
__a : Optional[int] = d_model
__a : Union[str, Any] = encoder_attention_heads
__a : int = decoder_attention_heads
__a : Any = encoder_ffn_dim
__a : Union[str, Any] = decoder_ffn_dim
__a : List[Any] = encoder_layers
__a : Optional[int] = decoder_layers
__a : int = dropout
__a : Optional[Any] = attention_dropout
__a : Dict = activation_dropout
__a : Union[str, Any] = encoder_layerdrop
__a : Optional[int] = decoder_layerdrop
__a : List[str] = activation_function
__a : str = init_std
__a : Optional[int] = use_cache
# Informer
__a : Union[str, Any] = attention_type
__a : str = sampling_factor
__a : Dict = distil
super().__init__(is_encoder_decoder=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
@property
def __lowerCAmelCase ( self : Any ):
'''simple docstring'''
return (
sum(self.embedding_dimension )
+ self.num_dynamic_real_features
+ self.num_time_features
+ self.num_static_real_features
+ self.input_size * 2 # the log1p(abs(loc)) and log(scale) features
)
| 47 | 1 |
import json
import os
from collections import Counter
import torch
import torchvision
import torchvision.transforms as transforms
from PIL import Image
from torch import nn
from torch.utils.data import Dataset
SCREAMING_SNAKE_CASE__ = {1: (1, 1), 2: (2, 1), 3: (3, 1), 4: (2, 2), 5: (5, 1), 6: (3, 2), 7: (7, 1), 8: (4, 2), 9: (3, 3)}
class _UpperCamelCase( nn.Module ):
def __init__( self : Tuple , SCREAMING_SNAKE_CASE__ : List[str] ):
'''simple docstring'''
super().__init__()
__a : str = torchvision.models.resnetaaa(pretrained=SCREAMING_SNAKE_CASE__ )
__a : Any = list(model.children() )[:-2]
__a : Dict = nn.Sequential(*SCREAMING_SNAKE_CASE__ )
__a : List[str] = nn.AdaptiveAvgPoolad(POOLING_BREAKDOWN[args.num_image_embeds] )
def __lowerCAmelCase ( self : int , SCREAMING_SNAKE_CASE__ : Optional[int] ):
'''simple docstring'''
__a : List[str] = self.pool(self.model(SCREAMING_SNAKE_CASE__ ) )
__a : Optional[int] = torch.flatten(SCREAMING_SNAKE_CASE__ , start_dim=2 )
__a : Optional[Any] = out.transpose(1 , 2 ).contiguous()
return out # BxNx2048
class _UpperCamelCase( __lowerCamelCase ):
def __init__( self : int , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : Union[str, Any] ):
'''simple docstring'''
__a : str = [json.loads(SCREAMING_SNAKE_CASE__ ) for l in open(SCREAMING_SNAKE_CASE__ )]
__a : List[str] = os.path.dirname(SCREAMING_SNAKE_CASE__ )
__a : str = tokenizer
__a : int = labels
__a : Dict = len(SCREAMING_SNAKE_CASE__ )
__a : Tuple = max_seq_length
__a : int = transforms
def __len__( self : List[Any] ):
'''simple docstring'''
return len(self.data )
def __getitem__( self : List[Any] , SCREAMING_SNAKE_CASE__ : Tuple ):
'''simple docstring'''
__a : Union[str, Any] = torch.LongTensor(self.tokenizer.encode(self.data[index]['text'] , add_special_tokens=SCREAMING_SNAKE_CASE__ ) )
__a , __a , __a : Any = sentence[0], sentence[1:-1], sentence[-1]
__a : List[Any] = sentence[: self.max_seq_length]
__a : str = torch.zeros(self.n_classes )
__a : Optional[Any] = 1
__a : str = Image.open(os.path.join(self.data_dir , self.data[index]['img'] ) ).convert('RGB' )
__a : int = self.transforms(SCREAMING_SNAKE_CASE__ )
return {
"image_start_token": start_token,
"image_end_token": end_token,
"sentence": sentence,
"image": image,
"label": label,
}
def __lowerCAmelCase ( self : Optional[Any] ):
'''simple docstring'''
__a : Optional[int] = Counter()
for row in self.data:
label_freqs.update(row['label'] )
return label_freqs
def UpperCAmelCase__ ( lowerCamelCase_ : Any ):
__a : int = [len(row['sentence'] ) for row in batch]
__a , __a : int = len(lowerCamelCase_ ), max(lowerCamelCase_ )
__a : Optional[int] = torch.zeros(lowerCamelCase_ , lowerCamelCase_ , dtype=torch.long )
__a : str = torch.zeros(lowerCamelCase_ , lowerCamelCase_ , dtype=torch.long )
for i_batch, (input_row, length) in enumerate(zip(lowerCamelCase_ , lowerCamelCase_ ) ):
__a : Dict = input_row['sentence']
__a : Optional[int] = 1
__a : int = torch.stack([row['image'] for row in batch] )
__a : Union[str, Any] = torch.stack([row['label'] for row in batch] )
__a : Any = torch.stack([row['image_start_token'] for row in batch] )
__a : Tuple = torch.stack([row['image_end_token'] for row in batch] )
return text_tensor, mask_tensor, img_tensor, img_start_token, img_end_token, tgt_tensor
def UpperCAmelCase__ ( ):
return [
"Crime",
"Drama",
"Thriller",
"Action",
"Comedy",
"Romance",
"Documentary",
"Short",
"Mystery",
"History",
"Family",
"Adventure",
"Fantasy",
"Sci-Fi",
"Western",
"Horror",
"Sport",
"War",
"Music",
"Musical",
"Animation",
"Biography",
"Film-Noir",
]
def UpperCAmelCase__ ( ):
return transforms.Compose(
[
transforms.Resize(2_5_6 ),
transforms.CenterCrop(2_2_4 ),
transforms.ToTensor(),
transforms.Normalize(
mean=[0.46777044, 0.44531429, 0.40661017] , std=[0.12221994, 0.12145835, 0.14380469] , ),
] )
| 47 |
import torch
from diffusers import DDIMParallelScheduler
from .test_schedulers import SchedulerCommonTest
class _UpperCamelCase( __lowerCamelCase ):
__SCREAMING_SNAKE_CASE : int = (DDIMParallelScheduler,)
__SCREAMING_SNAKE_CASE : Union[str, Any] = (('''eta''', 0.0), ('''num_inference_steps''', 50))
def __lowerCAmelCase ( self : str , **SCREAMING_SNAKE_CASE__ : Optional[int] ):
'''simple docstring'''
__a : List[Any] = {
'num_train_timesteps': 1_0_0_0,
'beta_start': 0.0_001,
'beta_end': 0.02,
'beta_schedule': 'linear',
'clip_sample': True,
}
config.update(**SCREAMING_SNAKE_CASE__ )
return config
def __lowerCAmelCase ( self : str , **SCREAMING_SNAKE_CASE__ : Optional[int] ):
'''simple docstring'''
__a : Tuple = self.scheduler_classes[0]
__a : Optional[Any] = self.get_scheduler_config(**SCREAMING_SNAKE_CASE__ )
__a : Any = scheduler_class(**SCREAMING_SNAKE_CASE__ )
__a , __a : List[str] = 1_0, 0.0
__a : Dict = self.dummy_model()
__a : str = self.dummy_sample_deter
scheduler.set_timesteps(SCREAMING_SNAKE_CASE__ )
for t in scheduler.timesteps:
__a : str = model(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
__a : List[str] = scheduler.step(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ).prev_sample
return sample
def __lowerCAmelCase ( self : Tuple ):
'''simple docstring'''
for timesteps in [1_0_0, 5_0_0, 1_0_0_0]:
self.check_over_configs(num_train_timesteps=SCREAMING_SNAKE_CASE__ )
def __lowerCAmelCase ( self : Dict ):
'''simple docstring'''
for steps_offset in [0, 1]:
self.check_over_configs(steps_offset=SCREAMING_SNAKE_CASE__ )
__a : Optional[Any] = self.scheduler_classes[0]
__a : List[str] = self.get_scheduler_config(steps_offset=1 )
__a : Optional[Any] = scheduler_class(**SCREAMING_SNAKE_CASE__ )
scheduler.set_timesteps(5 )
assert torch.equal(scheduler.timesteps , torch.LongTensor([8_0_1, 6_0_1, 4_0_1, 2_0_1, 1] ) )
def __lowerCAmelCase ( self : Optional[int] ):
'''simple docstring'''
for beta_start, beta_end in zip([0.0_001, 0.001, 0.01, 0.1] , [0.002, 0.02, 0.2, 2] ):
self.check_over_configs(beta_start=SCREAMING_SNAKE_CASE__ , beta_end=SCREAMING_SNAKE_CASE__ )
def __lowerCAmelCase ( self : int ):
'''simple docstring'''
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=SCREAMING_SNAKE_CASE__ )
def __lowerCAmelCase ( self : Optional[Any] ):
'''simple docstring'''
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=SCREAMING_SNAKE_CASE__ )
def __lowerCAmelCase ( self : List[Any] ):
'''simple docstring'''
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=SCREAMING_SNAKE_CASE__ )
def __lowerCAmelCase ( self : Optional[Any] ):
'''simple docstring'''
for timestep_spacing in ["trailing", "leading"]:
self.check_over_configs(timestep_spacing=SCREAMING_SNAKE_CASE__ )
def __lowerCAmelCase ( self : int ):
'''simple docstring'''
for rescale_betas_zero_snr in [True, False]:
self.check_over_configs(rescale_betas_zero_snr=SCREAMING_SNAKE_CASE__ )
def __lowerCAmelCase ( self : str ):
'''simple docstring'''
self.check_over_configs(thresholding=SCREAMING_SNAKE_CASE__ )
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(
thresholding=SCREAMING_SNAKE_CASE__ , prediction_type=SCREAMING_SNAKE_CASE__ , sample_max_value=SCREAMING_SNAKE_CASE__ , )
def __lowerCAmelCase ( self : Optional[int] ):
'''simple docstring'''
for t in [1, 1_0, 4_9]:
self.check_over_forward(time_step=SCREAMING_SNAKE_CASE__ )
def __lowerCAmelCase ( self : List[str] ):
'''simple docstring'''
for t, num_inference_steps in zip([1, 1_0, 5_0] , [1_0, 5_0, 5_0_0] ):
self.check_over_forward(time_step=SCREAMING_SNAKE_CASE__ , num_inference_steps=SCREAMING_SNAKE_CASE__ )
def __lowerCAmelCase ( self : List[Any] ):
'''simple docstring'''
for t, eta in zip([1, 1_0, 4_9] , [0.0, 0.5, 1.0] ):
self.check_over_forward(time_step=SCREAMING_SNAKE_CASE__ , eta=SCREAMING_SNAKE_CASE__ )
def __lowerCAmelCase ( self : List[Any] ):
'''simple docstring'''
__a : List[str] = self.scheduler_classes[0]
__a : Union[str, Any] = self.get_scheduler_config()
__a : Any = scheduler_class(**SCREAMING_SNAKE_CASE__ )
assert torch.sum(torch.abs(scheduler._get_variance(0 , 0 ) - 0.0 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(4_2_0 , 4_0_0 ) - 0.14_771 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(9_8_0 , 9_6_0 ) - 0.32_460 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(0 , 0 ) - 0.0 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(4_8_7 , 4_8_6 ) - 0.00_979 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(9_9_9 , 9_9_8 ) - 0.02 ) ) < 1e-5
def __lowerCAmelCase ( self : Optional[Any] ):
'''simple docstring'''
__a : List[str] = self.scheduler_classes[0]
__a : List[str] = self.get_scheduler_config()
__a : Optional[Any] = scheduler_class(**SCREAMING_SNAKE_CASE__ )
__a , __a : Any = 1_0, 0.0
scheduler.set_timesteps(SCREAMING_SNAKE_CASE__ )
__a : List[Any] = self.dummy_model()
__a : int = self.dummy_sample_deter
__a : List[Any] = self.dummy_sample_deter + 0.1
__a : List[str] = self.dummy_sample_deter - 0.1
__a : Optional[Any] = samplea.shape[0]
__a : Optional[Any] = torch.stack([samplea, samplea, samplea] , dim=0 )
__a : Union[str, Any] = torch.arange(SCREAMING_SNAKE_CASE__ )[0:3, None].repeat(1 , SCREAMING_SNAKE_CASE__ )
__a : int = model(samples.flatten(0 , 1 ) , timesteps.flatten(0 , 1 ) )
__a : int = scheduler.batch_step_no_noise(SCREAMING_SNAKE_CASE__ , timesteps.flatten(0 , 1 ) , samples.flatten(0 , 1 ) , SCREAMING_SNAKE_CASE__ )
__a : Dict = torch.sum(torch.abs(SCREAMING_SNAKE_CASE__ ) )
__a : Optional[Any] = torch.mean(torch.abs(SCREAMING_SNAKE_CASE__ ) )
assert abs(result_sum.item() - 1_147.7_904 ) < 1e-2
assert abs(result_mean.item() - 0.4_982 ) < 1e-3
def __lowerCAmelCase ( self : str ):
'''simple docstring'''
__a : List[str] = self.full_loop()
__a : Tuple = torch.sum(torch.abs(SCREAMING_SNAKE_CASE__ ) )
__a : int = torch.mean(torch.abs(SCREAMING_SNAKE_CASE__ ) )
assert abs(result_sum.item() - 172.0_067 ) < 1e-2
assert abs(result_mean.item() - 0.223_967 ) < 1e-3
def __lowerCAmelCase ( self : Optional[int] ):
'''simple docstring'''
__a : Optional[int] = self.full_loop(prediction_type='v_prediction' )
__a : str = torch.sum(torch.abs(SCREAMING_SNAKE_CASE__ ) )
__a : Union[str, Any] = torch.mean(torch.abs(SCREAMING_SNAKE_CASE__ ) )
assert abs(result_sum.item() - 52.5_302 ) < 1e-2
assert abs(result_mean.item() - 0.0_684 ) < 1e-3
def __lowerCAmelCase ( self : int ):
'''simple docstring'''
__a : Union[str, Any] = self.full_loop(set_alpha_to_one=SCREAMING_SNAKE_CASE__ , beta_start=0.01 )
__a : Optional[int] = torch.sum(torch.abs(SCREAMING_SNAKE_CASE__ ) )
__a : Optional[int] = torch.mean(torch.abs(SCREAMING_SNAKE_CASE__ ) )
assert abs(result_sum.item() - 149.8_295 ) < 1e-2
assert abs(result_mean.item() - 0.1_951 ) < 1e-3
def __lowerCAmelCase ( self : str ):
'''simple docstring'''
__a : Dict = self.full_loop(set_alpha_to_one=SCREAMING_SNAKE_CASE__ , beta_start=0.01 )
__a : str = torch.sum(torch.abs(SCREAMING_SNAKE_CASE__ ) )
__a : Tuple = torch.mean(torch.abs(SCREAMING_SNAKE_CASE__ ) )
assert abs(result_sum.item() - 149.0_784 ) < 1e-2
assert abs(result_mean.item() - 0.1_941 ) < 1e-3
| 47 | 1 |
import argparse
import pathlib
import fairseq
import torch
from fairseq.models.roberta import RobertaModel as FairseqRobertaModel
from fairseq.modules import TransformerSentenceEncoderLayer
from packaging import version
from transformers import XLMRobertaConfig, XLMRobertaXLForMaskedLM, XLMRobertaXLForSequenceClassification
from transformers.models.bert.modeling_bert import (
BertIntermediate,
BertLayer,
BertOutput,
BertSelfAttention,
BertSelfOutput,
)
from transformers.models.roberta.modeling_roberta import RobertaAttention
from transformers.utils import logging
if version.parse(fairseq.__version__) < version.parse('''1.0.0a'''):
raise Exception('''requires fairseq >= 1.0.0a''')
logging.set_verbosity_info()
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ = '''Hello world! cécé herlolip'''
def UpperCAmelCase__ ( lowerCamelCase_ : str , lowerCamelCase_ : str , lowerCamelCase_ : bool ):
__a : Union[str, Any] = FairseqRobertaModel.from_pretrained(lowerCamelCase_ )
roberta.eval() # disable dropout
__a : Optional[int] = roberta.model.encoder.sentence_encoder
__a : Any = XLMRobertaConfig(
vocab_size=roberta_sent_encoder.embed_tokens.num_embeddings , hidden_size=roberta.cfg.model.encoder_embed_dim , num_hidden_layers=roberta.cfg.model.encoder_layers , num_attention_heads=roberta.cfg.model.encoder_attention_heads , intermediate_size=roberta.cfg.model.encoder_ffn_embed_dim , max_position_embeddings=5_1_4 , type_vocab_size=1 , layer_norm_eps=1e-5 , )
if classification_head:
__a : Optional[int] = roberta.model.classification_heads['mnli'].out_proj.weight.shape[0]
print('Our RoBERTa config:' , lowerCamelCase_ )
__a : Dict = XLMRobertaXLForSequenceClassification(lowerCamelCase_ ) if classification_head else XLMRobertaXLForMaskedLM(lowerCamelCase_ )
model.eval()
# Now let's copy all the weights.
# Embeddings
__a : Optional[int] = roberta_sent_encoder.embed_tokens.weight
__a : Union[str, Any] = roberta_sent_encoder.embed_positions.weight
__a : List[Any] = torch.zeros_like(
model.roberta.embeddings.token_type_embeddings.weight ) # just zero them out b/c RoBERTa doesn't use them.
__a : int = roberta_sent_encoder.layer_norm.weight
__a : Union[str, Any] = roberta_sent_encoder.layer_norm.bias
for i in range(config.num_hidden_layers ):
# Encoder: start of layer
__a : BertLayer = model.roberta.encoder.layer[i]
__a : TransformerSentenceEncoderLayer = roberta_sent_encoder.layers[i]
__a : RobertaAttention = layer.attention
__a : Any = roberta_layer.self_attn_layer_norm.weight
__a : Any = roberta_layer.self_attn_layer_norm.bias
# self attention
__a : BertSelfAttention = layer.attention.self
assert (
roberta_layer.self_attn.k_proj.weight.data.shape
== roberta_layer.self_attn.q_proj.weight.data.shape
== roberta_layer.self_attn.v_proj.weight.data.shape
== torch.Size((config.hidden_size, config.hidden_size) )
)
__a : int = roberta_layer.self_attn.q_proj.weight
__a : Dict = roberta_layer.self_attn.q_proj.bias
__a : List[Any] = roberta_layer.self_attn.k_proj.weight
__a : Any = roberta_layer.self_attn.k_proj.bias
__a : List[str] = roberta_layer.self_attn.v_proj.weight
__a : Union[str, Any] = roberta_layer.self_attn.v_proj.bias
# self-attention output
__a : BertSelfOutput = layer.attention.output
assert self_output.dense.weight.shape == roberta_layer.self_attn.out_proj.weight.shape
__a : int = roberta_layer.self_attn.out_proj.weight
__a : Union[str, Any] = roberta_layer.self_attn.out_proj.bias
# this one is final layer norm
__a : Tuple = roberta_layer.final_layer_norm.weight
__a : Dict = roberta_layer.final_layer_norm.bias
# intermediate
__a : BertIntermediate = layer.intermediate
assert intermediate.dense.weight.shape == roberta_layer.fca.weight.shape
__a : Union[str, Any] = roberta_layer.fca.weight
__a : Tuple = roberta_layer.fca.bias
# output
__a : BertOutput = layer.output
assert bert_output.dense.weight.shape == roberta_layer.fca.weight.shape
__a : int = roberta_layer.fca.weight
__a : Optional[Any] = roberta_layer.fca.bias
# end of layer
if classification_head:
__a : Union[str, Any] = roberta.model.classification_heads['mnli'].dense.weight
__a : Dict = roberta.model.classification_heads['mnli'].dense.bias
__a : Optional[int] = roberta.model.classification_heads['mnli'].out_proj.weight
__a : Tuple = roberta.model.classification_heads['mnli'].out_proj.bias
else:
# LM Head
__a : Optional[Any] = roberta.model.encoder.lm_head.dense.weight
__a : Tuple = roberta.model.encoder.lm_head.dense.bias
__a : Dict = roberta.model.encoder.lm_head.layer_norm.weight
__a : Optional[Any] = roberta.model.encoder.lm_head.layer_norm.bias
__a : Union[str, Any] = roberta.model.encoder.lm_head.weight
__a : Optional[Any] = roberta.model.encoder.lm_head.bias
# Let's check that we get the same results.
__a : torch.Tensor = roberta.encode(lowerCamelCase_ ).unsqueeze(0 ) # batch of size 1
__a : Tuple = model(lowerCamelCase_ )[0]
if classification_head:
__a : Union[str, Any] = roberta.model.classification_heads['mnli'](roberta.extract_features(lowerCamelCase_ ) )
else:
__a : int = roberta.model(lowerCamelCase_ )[0]
print(our_output.shape , their_output.shape )
__a : Any = torch.max(torch.abs(our_output - their_output ) ).item()
print(f'''max_absolute_diff = {max_absolute_diff}''' ) # ~ 1e-7
__a : Union[str, Any] = torch.allclose(lowerCamelCase_ , lowerCamelCase_ , atol=1e-3 )
print('Do both models output the same tensors?' , '🔥' if success else '💩' )
if not success:
raise Exception('Something went wRoNg' )
pathlib.Path(lowerCamelCase_ ).mkdir(parents=lowerCamelCase_ , exist_ok=lowerCamelCase_ )
print(f'''Saving model to {pytorch_dump_folder_path}''' )
model.save_pretrained(lowerCamelCase_ )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--roberta_checkpoint_path''', default=None, type=str, required=True, help='''Path the official PyTorch dump.'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
parser.add_argument(
'''--classification_head''', action='''store_true''', help='''Whether to convert a final classification head.'''
)
SCREAMING_SNAKE_CASE__ = parser.parse_args()
convert_xlm_roberta_xl_checkpoint_to_pytorch(
args.roberta_checkpoint_path, args.pytorch_dump_folder_path, args.classification_head
)
| 47 |
def UpperCAmelCase__ ( lowerCamelCase_ : list[int] , lowerCamelCase_ : list[int] ):
# Check if the input is valid
if not len(lowerCamelCase_ ) == len(lowerCamelCase_ ) == 3:
raise ValueError('Please enter a valid equation.' )
if equationa[0] == equationa[1] == equationa[0] == equationa[1] == 0:
raise ValueError('Both a & b of two equations can\'t be zero.' )
# Extract the coefficients
__a , __a , __a : Optional[Any] = equationa
__a , __a , __a : Optional[int] = equationa
# Calculate the determinants of the matrices
__a : str = aa * ba - aa * ba
__a : Tuple = ca * ba - ca * ba
__a : Union[str, Any] = aa * ca - aa * ca
# Check if the system of linear equations has a solution (using Cramer's rule)
if determinant == 0:
if determinant_x == determinant_y == 0:
raise ValueError('Infinite solutions. (Consistent system)' )
else:
raise ValueError('No solution. (Inconsistent system)' )
else:
if determinant_x == determinant_y == 0:
# Trivial solution (Inconsistent system)
return (0.0, 0.0)
else:
__a : Any = determinant_x / determinant
__a : Optional[Any] = determinant_y / determinant
# Non-Trivial Solution (Consistent system)
return (x, y)
| 47 | 1 |
class _UpperCamelCase:
def __init__( self : List[Any] , SCREAMING_SNAKE_CASE__ : int ):
'''simple docstring'''
__a : Union[str, Any] = size
__a : Optional[Any] = [0] * size
__a : Any = [0] * size
@staticmethod
def __lowerCAmelCase ( SCREAMING_SNAKE_CASE__ : int ):
'''simple docstring'''
return index | (index + 1)
@staticmethod
def __lowerCAmelCase ( SCREAMING_SNAKE_CASE__ : int ):
'''simple docstring'''
return (index & (index + 1)) - 1
def __lowerCAmelCase ( self : List[Any] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int ):
'''simple docstring'''
__a : str = value
while index < self.size:
__a : int = self.get_prev(SCREAMING_SNAKE_CASE__ ) + 1
if current_left_border == index:
__a : List[Any] = value
else:
__a : Union[str, Any] = max(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
__a : List[Any] = self.get_next(SCREAMING_SNAKE_CASE__ )
def __lowerCAmelCase ( self : List[str] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int ):
'''simple docstring'''
right -= 1 # Because of right is exclusive
__a : int = 0
while left <= right:
__a : str = self.get_prev(SCREAMING_SNAKE_CASE__ )
if left <= current_left:
__a : int = max(SCREAMING_SNAKE_CASE__ , self.tree[right] )
__a : str = current_left
else:
__a : Optional[Any] = max(SCREAMING_SNAKE_CASE__ , self.arr[right] )
right -= 1
return result
if __name__ == "__main__":
import doctest
doctest.testmod()
| 47 |
from ...utils import is_note_seq_available, is_transformers_available, is_torch_available
from ...utils import OptionalDependencyNotAvailable
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .notes_encoder import SpectrogramNotesEncoder
from .continous_encoder import SpectrogramContEncoder
from .pipeline_spectrogram_diffusion import (
SpectrogramContEncoder,
SpectrogramDiffusionPipeline,
TaFilmDecoder,
)
try:
if not (is_transformers_available() and is_torch_available() and is_note_seq_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_transformers_and_torch_and_note_seq_objects import * # noqa F403
else:
from .midi_utils import MidiProcessor
| 47 | 1 |
from __future__ import annotations
def UpperCAmelCase__ ( lowerCamelCase_ : int = 4 ):
__a : Any = abs(lowerCamelCase_ ) or 4
return [[1 + x + y * row_size for x in range(lowerCamelCase_ )] for y in range(lowerCamelCase_ )]
def UpperCAmelCase__ ( lowerCamelCase_ : list[list[int]] ):
return reverse_row(transpose(lowerCamelCase_ ) )
# OR.. transpose(reverse_column(matrix))
def UpperCAmelCase__ ( lowerCamelCase_ : list[list[int]] ):
return reverse_row(reverse_column(lowerCamelCase_ ) )
# OR.. reverse_column(reverse_row(matrix))
def UpperCAmelCase__ ( lowerCamelCase_ : list[list[int]] ):
return reverse_column(transpose(lowerCamelCase_ ) )
# OR.. transpose(reverse_row(matrix))
def UpperCAmelCase__ ( lowerCamelCase_ : list[list[int]] ):
__a : str = [list(lowerCamelCase_ ) for x in zip(*lowerCamelCase_ )]
return matrix
def UpperCAmelCase__ ( lowerCamelCase_ : list[list[int]] ):
__a : Any = matrix[::-1]
return matrix
def UpperCAmelCase__ ( lowerCamelCase_ : list[list[int]] ):
__a : Optional[Any] = [x[::-1] for x in matrix]
return matrix
def UpperCAmelCase__ ( lowerCamelCase_ : list[list[int]] ):
for i in matrix:
print(*lowerCamelCase_ )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ = make_matrix()
print('''\norigin:\n''')
print_matrix(matrix)
print('''\nrotate 90 counterclockwise:\n''')
print_matrix(rotate_aa(matrix))
SCREAMING_SNAKE_CASE__ = make_matrix()
print('''\norigin:\n''')
print_matrix(matrix)
print('''\nrotate 180:\n''')
print_matrix(rotate_aaa(matrix))
SCREAMING_SNAKE_CASE__ = make_matrix()
print('''\norigin:\n''')
print_matrix(matrix)
print('''\nrotate 270 counterclockwise:\n''')
print_matrix(rotate_aaa(matrix))
| 47 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
SCREAMING_SNAKE_CASE__ = {
'''configuration_bridgetower''': [
'''BRIDGETOWER_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''BridgeTowerConfig''',
'''BridgeTowerTextConfig''',
'''BridgeTowerVisionConfig''',
],
'''processing_bridgetower''': ['''BridgeTowerProcessor'''],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ = ['''BridgeTowerImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ = [
'''BRIDGETOWER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''BridgeTowerForContrastiveLearning''',
'''BridgeTowerForImageAndTextRetrieval''',
'''BridgeTowerForMaskedLM''',
'''BridgeTowerModel''',
'''BridgeTowerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_bridgetower import (
BRIDGETOWER_PRETRAINED_CONFIG_ARCHIVE_MAP,
BridgeTowerConfig,
BridgeTowerTextConfig,
BridgeTowerVisionConfig,
)
from .processing_bridgetower import BridgeTowerProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_bridgetower import BridgeTowerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_bridgetower import (
BRIDGETOWER_PRETRAINED_MODEL_ARCHIVE_LIST,
BridgeTowerForContrastiveLearning,
BridgeTowerForImageAndTextRetrieval,
BridgeTowerForMaskedLM,
BridgeTowerModel,
BridgeTowerPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
| 47 | 1 |
def UpperCAmelCase__ ( lowerCamelCase_ : int = 5_0 ):
__a : int = [1] * (length + 1)
for row_length in range(3 , length + 1 ):
for block_length in range(3 , row_length + 1 ):
for block_start in range(row_length - block_length ):
ways_number[row_length] += ways_number[
row_length - block_start - block_length - 1
]
ways_number[row_length] += 1
return ways_number[length]
if __name__ == "__main__":
print(F"{solution() = }")
| 47 |
from string import ascii_lowercase, ascii_uppercase
def UpperCAmelCase__ ( lowerCamelCase_ : str ):
if not sentence:
return ""
__a : Union[str, Any] = dict(zip(lowerCamelCase_ , lowerCamelCase_ ) )
return lower_to_upper.get(sentence[0] , sentence[0] ) + sentence[1:]
if __name__ == "__main__":
from doctest import testmod
testmod()
| 47 | 1 |
from collections import deque
class _UpperCamelCase:
def __init__( self : Tuple , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int ):
'''simple docstring'''
__a : int = process_name # process name
__a : Dict = arrival_time # arrival time of the process
# completion time of finished process or last interrupted time
__a : int = arrival_time
__a : Any = burst_time # remaining burst time
__a : List[str] = 0 # total time of the process wait in ready queue
__a : Union[str, Any] = 0 # time from arrival time to completion time
class _UpperCamelCase:
def __init__( self : Optional[int] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : list[int] , SCREAMING_SNAKE_CASE__ : deque[Process] , SCREAMING_SNAKE_CASE__ : int , ):
'''simple docstring'''
__a : Dict = number_of_queues
# time slice of queues that round robin algorithm applied
__a : int = time_slices
# unfinished process is in this ready_queue
__a : List[Any] = queue
# current time
__a : Dict = current_time
# finished process is in this sequence queue
__a : deque[Process] = deque()
def __lowerCAmelCase ( self : List[str] ):
'''simple docstring'''
__a : List[Any] = []
for i in range(len(self.finish_queue ) ):
sequence.append(self.finish_queue[i].process_name )
return sequence
def __lowerCAmelCase ( self : List[str] , SCREAMING_SNAKE_CASE__ : list[Process] ):
'''simple docstring'''
__a : str = []
for i in range(len(SCREAMING_SNAKE_CASE__ ) ):
waiting_times.append(queue[i].waiting_time )
return waiting_times
def __lowerCAmelCase ( self : Dict , SCREAMING_SNAKE_CASE__ : list[Process] ):
'''simple docstring'''
__a : List[str] = []
for i in range(len(SCREAMING_SNAKE_CASE__ ) ):
turnaround_times.append(queue[i].turnaround_time )
return turnaround_times
def __lowerCAmelCase ( self : Optional[int] , SCREAMING_SNAKE_CASE__ : list[Process] ):
'''simple docstring'''
__a : Tuple = []
for i in range(len(SCREAMING_SNAKE_CASE__ ) ):
completion_times.append(queue[i].stop_time )
return completion_times
def __lowerCAmelCase ( self : str , SCREAMING_SNAKE_CASE__ : deque[Process] ):
'''simple docstring'''
return [q.burst_time for q in queue]
def __lowerCAmelCase ( self : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Process ):
'''simple docstring'''
process.waiting_time += self.current_time - process.stop_time
return process.waiting_time
def __lowerCAmelCase ( self : Any , SCREAMING_SNAKE_CASE__ : deque[Process] ):
'''simple docstring'''
__a : deque[Process] = deque() # sequence deque of finished process
while len(SCREAMING_SNAKE_CASE__ ) != 0:
__a : Any = ready_queue.popleft() # current process
# if process's arrival time is later than current time, update current time
if self.current_time < cp.arrival_time:
self.current_time += cp.arrival_time
# update waiting time of current process
self.update_waiting_time(SCREAMING_SNAKE_CASE__ )
# update current time
self.current_time += cp.burst_time
# finish the process and set the process's burst-time 0
__a : str = 0
# set the process's turnaround time because it is finished
__a : str = self.current_time - cp.arrival_time
# set the completion time
__a : Optional[Any] = self.current_time
# add the process to queue that has finished queue
finished.append(SCREAMING_SNAKE_CASE__ )
self.finish_queue.extend(SCREAMING_SNAKE_CASE__ ) # add finished process to finish queue
# FCFS will finish all remaining processes
return finished
def __lowerCAmelCase ( self : Optional[int] , SCREAMING_SNAKE_CASE__ : deque[Process] , SCREAMING_SNAKE_CASE__ : int ):
'''simple docstring'''
__a : deque[Process] = deque() # sequence deque of terminated process
# just for 1 cycle and unfinished processes will go back to queue
for _ in range(len(SCREAMING_SNAKE_CASE__ ) ):
__a : str = ready_queue.popleft() # current process
# if process's arrival time is later than current time, update current time
if self.current_time < cp.arrival_time:
self.current_time += cp.arrival_time
# update waiting time of unfinished processes
self.update_waiting_time(SCREAMING_SNAKE_CASE__ )
# if the burst time of process is bigger than time-slice
if cp.burst_time > time_slice:
# use CPU for only time-slice
self.current_time += time_slice
# update remaining burst time
cp.burst_time -= time_slice
# update end point time
__a : Union[str, Any] = self.current_time
# locate the process behind the queue because it is not finished
ready_queue.append(SCREAMING_SNAKE_CASE__ )
else:
# use CPU for remaining burst time
self.current_time += cp.burst_time
# set burst time 0 because the process is finished
__a : Optional[int] = 0
# set the finish time
__a : Dict = self.current_time
# update the process' turnaround time because it is finished
__a : Tuple = self.current_time - cp.arrival_time
# add the process to queue that has finished queue
finished.append(SCREAMING_SNAKE_CASE__ )
self.finish_queue.extend(SCREAMING_SNAKE_CASE__ ) # add finished process to finish queue
# return finished processes queue and remaining processes queue
return finished, ready_queue
def __lowerCAmelCase ( self : Tuple ):
'''simple docstring'''
for i in range(self.number_of_queues - 1 ):
__a , __a : Union[str, Any] = self.round_robin(
self.ready_queue , self.time_slices[i] )
# the last queue has first_come_first_served algorithm
self.first_come_first_served(self.ready_queue )
return self.finish_queue
if __name__ == "__main__":
import doctest
SCREAMING_SNAKE_CASE__ = Process('''P1''', 0, 53)
SCREAMING_SNAKE_CASE__ = Process('''P2''', 0, 17)
SCREAMING_SNAKE_CASE__ = Process('''P3''', 0, 68)
SCREAMING_SNAKE_CASE__ = Process('''P4''', 0, 24)
SCREAMING_SNAKE_CASE__ = 3
SCREAMING_SNAKE_CASE__ = [17, 25]
SCREAMING_SNAKE_CASE__ = deque([Pa, Pa, Pa, Pa])
if len(time_slices) != number_of_queues - 1:
raise SystemExit(0)
doctest.testmod(extraglobs={'''queue''': deque([Pa, Pa, Pa, Pa])})
SCREAMING_SNAKE_CASE__ = Process('''P1''', 0, 53)
SCREAMING_SNAKE_CASE__ = Process('''P2''', 0, 17)
SCREAMING_SNAKE_CASE__ = Process('''P3''', 0, 68)
SCREAMING_SNAKE_CASE__ = Process('''P4''', 0, 24)
SCREAMING_SNAKE_CASE__ = 3
SCREAMING_SNAKE_CASE__ = [17, 25]
SCREAMING_SNAKE_CASE__ = deque([Pa, Pa, Pa, Pa])
SCREAMING_SNAKE_CASE__ = MLFQ(number_of_queues, time_slices, queue, 0)
SCREAMING_SNAKE_CASE__ = mlfq.multi_level_feedback_queue()
# print total waiting times of processes(P1, P2, P3, P4)
print(
F"waiting time:\
\t\t\t{MLFQ.calculate_waiting_time(mlfq, [Pa, Pa, Pa, Pa])}"
)
# print completion times of processes(P1, P2, P3, P4)
print(
F"completion time:\
\t\t{MLFQ.calculate_completion_time(mlfq, [Pa, Pa, Pa, Pa])}"
)
# print total turnaround times of processes(P1, P2, P3, P4)
print(
F"turnaround time:\
\t\t{MLFQ.calculate_turnaround_time(mlfq, [Pa, Pa, Pa, Pa])}"
)
# print sequence of finished processes
print(
F"sequence of finished processes:\
{mlfq.calculate_sequence_of_finish_queue()}"
)
| 47 |
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ = {
'''asapp/sew-d-tiny-100k''': '''https://huggingface.co/asapp/sew-d-tiny-100k/resolve/main/config.json''',
# See all SEW-D models at https://huggingface.co/models?filter=sew-d
}
class _UpperCamelCase( __lowerCamelCase ):
__SCREAMING_SNAKE_CASE : Optional[Any] = '''sew-d'''
def __init__( self : Dict , SCREAMING_SNAKE_CASE__ : Dict=3_2 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=7_6_8 , SCREAMING_SNAKE_CASE__ : Optional[Any]=1_2 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=1_2 , SCREAMING_SNAKE_CASE__ : str=3_0_7_2 , SCREAMING_SNAKE_CASE__ : str=2 , SCREAMING_SNAKE_CASE__ : Optional[Any]=5_1_2 , SCREAMING_SNAKE_CASE__ : List[str]=2_5_6 , SCREAMING_SNAKE_CASE__ : Optional[Any]=True , SCREAMING_SNAKE_CASE__ : Tuple=True , SCREAMING_SNAKE_CASE__ : List[str]=("p2c", "c2p") , SCREAMING_SNAKE_CASE__ : str="layer_norm" , SCREAMING_SNAKE_CASE__ : Tuple="gelu_python" , SCREAMING_SNAKE_CASE__ : Tuple=0.1 , SCREAMING_SNAKE_CASE__ : Optional[int]=0.1 , SCREAMING_SNAKE_CASE__ : Any=0.1 , SCREAMING_SNAKE_CASE__ : Any=0.0 , SCREAMING_SNAKE_CASE__ : str=0.1 , SCREAMING_SNAKE_CASE__ : Optional[int]=0.02 , SCREAMING_SNAKE_CASE__ : int=1e-7 , SCREAMING_SNAKE_CASE__ : Any=1e-5 , SCREAMING_SNAKE_CASE__ : Optional[int]="group" , SCREAMING_SNAKE_CASE__ : Optional[Any]="gelu" , SCREAMING_SNAKE_CASE__ : Optional[int]=(6_4, 1_2_8, 1_2_8, 1_2_8, 1_2_8, 2_5_6, 2_5_6, 2_5_6, 2_5_6, 5_1_2, 5_1_2, 5_1_2, 5_1_2) , SCREAMING_SNAKE_CASE__ : List[Any]=(5, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1) , SCREAMING_SNAKE_CASE__ : str=(1_0, 3, 1, 3, 1, 3, 1, 3, 1, 2, 1, 2, 1) , SCREAMING_SNAKE_CASE__ : Optional[int]=False , SCREAMING_SNAKE_CASE__ : Optional[int]=1_2_8 , SCREAMING_SNAKE_CASE__ : Tuple=1_6 , SCREAMING_SNAKE_CASE__ : Any=True , SCREAMING_SNAKE_CASE__ : List[Any]=0.05 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=1_0 , SCREAMING_SNAKE_CASE__ : List[Any]=2 , SCREAMING_SNAKE_CASE__ : int=0.0 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=1_0 , SCREAMING_SNAKE_CASE__ : Optional[int]=0 , SCREAMING_SNAKE_CASE__ : Optional[int]="mean" , SCREAMING_SNAKE_CASE__ : List[Any]=False , SCREAMING_SNAKE_CASE__ : List[str]=False , SCREAMING_SNAKE_CASE__ : str=2_5_6 , SCREAMING_SNAKE_CASE__ : str=0 , SCREAMING_SNAKE_CASE__ : List[Any]=1 , SCREAMING_SNAKE_CASE__ : List[Any]=2 , **SCREAMING_SNAKE_CASE__ : Any , ):
'''simple docstring'''
super().__init__(**SCREAMING_SNAKE_CASE__ , pad_token_id=SCREAMING_SNAKE_CASE__ , bos_token_id=SCREAMING_SNAKE_CASE__ , eos_token_id=SCREAMING_SNAKE_CASE__ )
__a : Optional[int] = hidden_size
__a : Optional[Any] = feat_extract_norm
__a : List[str] = feat_extract_activation
__a : Dict = list(SCREAMING_SNAKE_CASE__ )
__a : Union[str, Any] = list(SCREAMING_SNAKE_CASE__ )
__a : List[str] = list(SCREAMING_SNAKE_CASE__ )
__a : int = conv_bias
__a : Tuple = num_conv_pos_embeddings
__a : List[str] = num_conv_pos_embedding_groups
__a : Optional[Any] = len(self.conv_dim )
__a : Union[str, Any] = num_hidden_layers
__a : Optional[Any] = intermediate_size
__a : Union[str, Any] = squeeze_factor
__a : List[Any] = max_position_embeddings
__a : Tuple = position_buckets
__a : Optional[int] = share_att_key
__a : List[str] = relative_attention
__a : Any = norm_rel_ebd
__a : Any = list(SCREAMING_SNAKE_CASE__ )
__a : Union[str, Any] = hidden_act
__a : str = num_attention_heads
__a : Union[str, Any] = hidden_dropout
__a : Optional[int] = attention_dropout
__a : List[str] = activation_dropout
__a : int = feat_proj_dropout
__a : int = final_dropout
__a : Dict = layer_norm_eps
__a : Tuple = feature_layer_norm_eps
__a : str = initializer_range
__a : Tuple = vocab_size
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
'Configuration for convolutional layers is incorrect.'
'It is required that `len(config.conv_dim)` == `len(config.conv_stride)` == `len(config.conv_kernel)`,'
f'''but is `len(config.conv_dim) = {len(self.conv_dim )}`, `len(config.conv_stride)'''
f'''= {len(self.conv_stride )}`, `len(config.conv_kernel) = {len(self.conv_kernel )}`.''' )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
__a : Tuple = apply_spec_augment
__a : Optional[Any] = mask_time_prob
__a : Any = mask_time_length
__a : List[str] = mask_time_min_masks
__a : List[str] = mask_feature_prob
__a : Tuple = mask_feature_length
__a : Any = mask_feature_min_masks
# ctc loss
__a : Optional[int] = ctc_loss_reduction
__a : List[Any] = ctc_zero_infinity
# sequence classification
__a : Dict = use_weighted_layer_sum
__a : Optional[Any] = classifier_proj_size
@property
def __lowerCAmelCase ( self : Optional[Any] ):
'''simple docstring'''
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 47 | 1 |
import socket
def UpperCAmelCase__ ( ):
__a : int = socket.socket(socket.AF_INET , socket.SOCK_STREAM )
__a : Optional[Any] = socket.gethostname()
__a : int = 1_2_3_1_2
sock.connect((host, port) )
sock.send(b'Hello server!' )
with open('Received_file' , 'wb' ) as out_file:
print('File opened' )
print('Receiving data...' )
while True:
__a : List[Any] = sock.recv(1_0_2_4 )
if not data:
break
out_file.write(lowerCamelCase_ )
print('Successfully received the file' )
sock.close()
print('Connection closed' )
if __name__ == "__main__":
main()
| 47 |
from __future__ import annotations
from sys import maxsize
from typing import Generic, TypeVar
SCREAMING_SNAKE_CASE__ = TypeVar('''T''')
def UpperCAmelCase__ ( lowerCamelCase_ : int ):
return (position - 1) // 2
def UpperCAmelCase__ ( lowerCamelCase_ : int ):
return (2 * position) + 1
def UpperCAmelCase__ ( lowerCamelCase_ : int ):
return (2 * position) + 2
class _UpperCamelCase( Generic[T] ):
def __init__( self : List[str] ):
'''simple docstring'''
__a : list[tuple[T, int]] = []
__a : dict[T, int] = {}
__a : int = 0
def __len__( self : Any ):
'''simple docstring'''
return self.elements
def __repr__( self : Any ):
'''simple docstring'''
return str(self.heap )
def __lowerCAmelCase ( self : Optional[Any] ):
'''simple docstring'''
return self.elements == 0
def __lowerCAmelCase ( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : T , SCREAMING_SNAKE_CASE__ : int ):
'''simple docstring'''
self.heap.append((elem, weight) )
__a : List[Any] = self.elements
self.elements += 1
self._bubble_up(SCREAMING_SNAKE_CASE__ )
def __lowerCAmelCase ( self : Tuple ):
'''simple docstring'''
if self.elements > 1:
self._swap_nodes(0 , self.elements - 1 )
__a , __a : Union[str, Any] = self.heap.pop()
del self.position_map[elem]
self.elements -= 1
if self.elements > 0:
__a , __a : Dict = self.heap[0]
self._bubble_down(SCREAMING_SNAKE_CASE__ )
return elem
def __lowerCAmelCase ( self : List[str] , SCREAMING_SNAKE_CASE__ : T , SCREAMING_SNAKE_CASE__ : int ):
'''simple docstring'''
__a : List[Any] = self.position_map[elem]
__a : str = (elem, weight)
if position > 0:
__a : Tuple = get_parent_position(SCREAMING_SNAKE_CASE__ )
__a , __a : Dict = self.heap[parent_position]
if parent_weight > weight:
self._bubble_up(SCREAMING_SNAKE_CASE__ )
else:
self._bubble_down(SCREAMING_SNAKE_CASE__ )
else:
self._bubble_down(SCREAMING_SNAKE_CASE__ )
def __lowerCAmelCase ( self : Any , SCREAMING_SNAKE_CASE__ : T ):
'''simple docstring'''
__a : List[Any] = self.position_map[elem]
if curr_pos == 0:
return None
__a : List[str] = get_parent_position(SCREAMING_SNAKE_CASE__ )
__a , __a : str = self.heap[curr_pos]
__a , __a : Optional[int] = self.heap[parent_position]
if parent_weight > weight:
self._swap_nodes(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
return self._bubble_up(SCREAMING_SNAKE_CASE__ )
return None
def __lowerCAmelCase ( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : T ):
'''simple docstring'''
__a : int = self.position_map[elem]
__a , __a : Optional[Any] = self.heap[curr_pos]
__a : Tuple = get_child_left_position(SCREAMING_SNAKE_CASE__ )
__a : Optional[Any] = get_child_right_position(SCREAMING_SNAKE_CASE__ )
if child_left_position < self.elements and child_right_position < self.elements:
__a , __a : str = self.heap[child_left_position]
__a , __a : List[str] = self.heap[child_right_position]
if child_right_weight < child_left_weight and child_right_weight < weight:
self._swap_nodes(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
return self._bubble_down(SCREAMING_SNAKE_CASE__ )
if child_left_position < self.elements:
__a , __a : Any = self.heap[child_left_position]
if child_left_weight < weight:
self._swap_nodes(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
return self._bubble_down(SCREAMING_SNAKE_CASE__ )
else:
return None
if child_right_position < self.elements:
__a , __a : Union[str, Any] = self.heap[child_right_position]
if child_right_weight < weight:
self._swap_nodes(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
return self._bubble_down(SCREAMING_SNAKE_CASE__ )
return None
def __lowerCAmelCase ( self : List[Any] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int ):
'''simple docstring'''
__a : Optional[Any] = self.heap[nodea_pos][0]
__a : str = self.heap[nodea_pos][0]
__a , __a : int = (
self.heap[nodea_pos],
self.heap[nodea_pos],
)
__a : str = nodea_pos
__a : Optional[int] = nodea_pos
class _UpperCamelCase( Generic[T] ):
def __init__( self : List[Any] ):
'''simple docstring'''
__a : dict[T, dict[T, int]] = {}
__a : int = 0
def __repr__( self : Tuple ):
'''simple docstring'''
return str(self.connections )
def __len__( self : Dict ):
'''simple docstring'''
return self.nodes
def __lowerCAmelCase ( self : Optional[int] , SCREAMING_SNAKE_CASE__ : T ):
'''simple docstring'''
if node not in self.connections:
__a : Tuple = {}
self.nodes += 1
def __lowerCAmelCase ( self : Any , SCREAMING_SNAKE_CASE__ : T , SCREAMING_SNAKE_CASE__ : T , SCREAMING_SNAKE_CASE__ : int ):
'''simple docstring'''
self.add_node(SCREAMING_SNAKE_CASE__ )
self.add_node(SCREAMING_SNAKE_CASE__ )
__a : Optional[Any] = weight
__a : Any = weight
def UpperCAmelCase__ ( lowerCamelCase_ : GraphUndirectedWeighted[T] , ):
__a : dict[T, int] = {node: maxsize for node in graph.connections}
__a : dict[T, T | None] = {node: None for node in graph.connections}
__a : MinPriorityQueue[T] = MinPriorityQueue()
for node, weight in dist.items():
priority_queue.push(lowerCamelCase_ , lowerCamelCase_ )
if priority_queue.is_empty():
return dist, parent
# initialization
__a : Optional[int] = priority_queue.extract_min()
__a : int = 0
for neighbour in graph.connections[node]:
if dist[neighbour] > dist[node] + graph.connections[node][neighbour]:
__a : str = dist[node] + graph.connections[node][neighbour]
priority_queue.update_key(lowerCamelCase_ , dist[neighbour] )
__a : Optional[int] = node
# running prim's algorithm
while not priority_queue.is_empty():
__a : Any = priority_queue.extract_min()
for neighbour in graph.connections[node]:
if dist[neighbour] > dist[node] + graph.connections[node][neighbour]:
__a : Tuple = dist[node] + graph.connections[node][neighbour]
priority_queue.update_key(lowerCamelCase_ , dist[neighbour] )
__a : Dict = node
return dist, parent
| 47 | 1 |
import argparse
from pathlib import Path
from typing import Dict, OrderedDict, Tuple
import torch
from audiocraft.models import MusicGen
from transformers import (
AutoFeatureExtractor,
AutoTokenizer,
EncodecModel,
MusicgenDecoderConfig,
MusicgenForConditionalGeneration,
MusicgenProcessor,
TaEncoderModel,
)
from transformers.models.musicgen.modeling_musicgen import MusicgenForCausalLM
from transformers.utils import logging
logging.set_verbosity_info()
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ = ['''model.decoder.embed_positions.weights''']
def UpperCAmelCase__ ( lowerCamelCase_ : Tuple ):
if "emb" in name:
__a : Any = name.replace('emb' , 'model.decoder.embed_tokens' )
if "transformer" in name:
__a : str = name.replace('transformer' , 'model.decoder' )
if "cross_attention" in name:
__a : List[Any] = name.replace('cross_attention' , 'encoder_attn' )
if "linear1" in name:
__a : List[Any] = name.replace('linear1' , 'fc1' )
if "linear2" in name:
__a : List[str] = name.replace('linear2' , 'fc2' )
if "norm1" in name:
__a : List[str] = name.replace('norm1' , 'self_attn_layer_norm' )
if "norm_cross" in name:
__a : List[Any] = name.replace('norm_cross' , 'encoder_attn_layer_norm' )
if "norm2" in name:
__a : str = name.replace('norm2' , 'final_layer_norm' )
if "out_norm" in name:
__a : int = name.replace('out_norm' , 'model.decoder.layer_norm' )
if "linears" in name:
__a : Any = name.replace('linears' , 'lm_heads' )
if "condition_provider.conditioners.description.output_proj" in name:
__a : List[Any] = name.replace('condition_provider.conditioners.description.output_proj' , 'enc_to_dec_proj' )
return name
def UpperCAmelCase__ ( lowerCamelCase_ : OrderedDict , lowerCamelCase_ : int ):
__a : Union[str, Any] = list(state_dict.keys() )
__a : Optional[int] = {}
for key in keys:
__a : Optional[int] = state_dict.pop(lowerCamelCase_ )
__a : List[Any] = rename_keys(lowerCamelCase_ )
if "in_proj_weight" in key:
# split fused qkv proj
__a : Optional[Any] = val[:hidden_size, :]
__a : Optional[Any] = val[hidden_size : 2 * hidden_size, :]
__a : str = val[-hidden_size:, :]
elif "enc_to_dec_proj" in key:
__a : List[str] = val
else:
__a : Any = val
return state_dict, enc_dec_proj_state_dict
def UpperCAmelCase__ ( lowerCamelCase_ : str ):
if checkpoint == "small":
# default config values
__a : Union[str, Any] = 1_0_2_4
__a : Any = 2_4
__a : Tuple = 1_6
elif checkpoint == "medium":
__a : Dict = 1_5_3_6
__a : Dict = 4_8
__a : Union[str, Any] = 2_4
elif checkpoint == "large":
__a : int = 2_0_4_8
__a : Dict = 4_8
__a : Union[str, Any] = 3_2
else:
raise ValueError(f'''Checkpoint should be one of `[\'small\', \'medium\', \'large\']`, got {checkpoint}.''' )
__a : str = MusicgenDecoderConfig(
hidden_size=lowerCamelCase_ , ffn_dim=hidden_size * 4 , num_hidden_layers=lowerCamelCase_ , num_attention_heads=lowerCamelCase_ , )
return config
@torch.no_grad()
def UpperCAmelCase__ ( lowerCamelCase_ : Tuple , lowerCamelCase_ : Optional[Any]=None , lowerCamelCase_ : int=None , lowerCamelCase_ : List[Any]="cpu" ):
__a : str = MusicGen.get_pretrained(lowerCamelCase_ , device=lowerCamelCase_ )
__a : str = decoder_config_from_checkpoint(lowerCamelCase_ )
__a : Tuple = fairseq_model.lm.state_dict()
__a , __a : int = rename_state_dict(
lowerCamelCase_ , hidden_size=decoder_config.hidden_size )
__a : int = TaEncoderModel.from_pretrained('t5-base' )
__a : List[Any] = EncodecModel.from_pretrained('facebook/encodec_32khz' )
__a : Tuple = MusicgenForCausalLM(lowerCamelCase_ ).eval()
# load all decoder weights - expect that we'll be missing embeddings and enc-dec projection
__a , __a : Optional[int] = decoder.load_state_dict(lowerCamelCase_ , strict=lowerCamelCase_ )
for key in missing_keys.copy():
if key.startswith(('text_encoder', 'audio_encoder') ) or key in EXPECTED_MISSING_KEYS:
missing_keys.remove(lowerCamelCase_ )
if len(lowerCamelCase_ ) > 0:
raise ValueError(f'''Missing key(s) in state_dict: {missing_keys}''' )
if len(lowerCamelCase_ ) > 0:
raise ValueError(f'''Unexpected key(s) in state_dict: {unexpected_keys}''' )
# init the composite model
__a : Union[str, Any] = MusicgenForConditionalGeneration(text_encoder=lowerCamelCase_ , audio_encoder=lowerCamelCase_ , decoder=lowerCamelCase_ )
# load the pre-trained enc-dec projection (from the decoder state dict)
model.enc_to_dec_proj.load_state_dict(lowerCamelCase_ )
# check we can do a forward pass
__a : Any = torch.arange(0 , 8 , dtype=torch.long ).reshape(2 , -1 )
__a : Tuple = input_ids.reshape(2 * 4 , -1 )
with torch.no_grad():
__a : Any = model(input_ids=lowerCamelCase_ , decoder_input_ids=lowerCamelCase_ ).logits
if logits.shape != (8, 1, 2_0_4_8):
raise ValueError('Incorrect shape for logits' )
# now construct the processor
__a : Any = AutoTokenizer.from_pretrained('t5-base' )
__a : Optional[Any] = AutoFeatureExtractor.from_pretrained('facebook/encodec_32khz' , padding_side='left' )
__a : Union[str, Any] = MusicgenProcessor(feature_extractor=lowerCamelCase_ , tokenizer=lowerCamelCase_ )
# set the appropriate bos/pad token ids
__a : Tuple = 2_0_4_8
__a : int = 2_0_4_8
# set other default generation config params
__a : Union[str, Any] = int(3_0 * audio_encoder.config.frame_rate )
__a : List[Any] = True
__a : Any = 3.0
if pytorch_dump_folder is not None:
Path(lowerCamelCase_ ).mkdir(exist_ok=lowerCamelCase_ )
logger.info(f'''Saving model {checkpoint} to {pytorch_dump_folder}''' )
model.save_pretrained(lowerCamelCase_ )
processor.save_pretrained(lowerCamelCase_ )
if repo_id:
logger.info(f'''Pushing model {checkpoint} to {repo_id}''' )
model.push_to_hub(lowerCamelCase_ )
processor.push_to_hub(lowerCamelCase_ )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--checkpoint''',
default='''small''',
type=str,
help='''Checkpoint size of the MusicGen model you\'d like to convert. Can be one of: `[\'small\', \'medium\', \'large\']`.''',
)
parser.add_argument(
'''--pytorch_dump_folder''',
required=True,
default=None,
type=str,
help='''Path to the output PyTorch model directory.''',
)
parser.add_argument(
'''--push_to_hub''', default=None, type=str, help='''Where to upload the converted model on the 🤗 hub.'''
)
parser.add_argument(
'''--device''', default='''cpu''', type=str, help='''Torch device to run the conversion, either cpu or cuda.'''
)
SCREAMING_SNAKE_CASE__ = parser.parse_args()
convert_musicgen_checkpoint(args.checkpoint, args.pytorch_dump_folder, args.push_to_hub)
| 47 |
from collections.abc import Sequence
from queue import Queue
class _UpperCamelCase:
def __init__( self : Tuple , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : int=None , SCREAMING_SNAKE_CASE__ : Tuple=None ):
'''simple docstring'''
__a : Tuple = start
__a : Dict = end
__a : List[str] = val
__a : List[Any] = (start + end) // 2
__a : Optional[Any] = left
__a : List[str] = right
def __repr__( self : Dict ):
'''simple docstring'''
return f'''SegmentTreeNode(start={self.start}, end={self.end}, val={self.val})'''
class _UpperCamelCase:
def __init__( self : List[str] , SCREAMING_SNAKE_CASE__ : Sequence , SCREAMING_SNAKE_CASE__ : Optional[Any] ):
'''simple docstring'''
__a : Tuple = collection
__a : Dict = function
if self.collection:
__a : int = self._build_tree(0 , len(SCREAMING_SNAKE_CASE__ ) - 1 )
def __lowerCAmelCase ( self : Optional[int] , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : int ):
'''simple docstring'''
self._update_tree(self.root , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def __lowerCAmelCase ( self : str , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : List[str] ):
'''simple docstring'''
return self._query_range(self.root , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def __lowerCAmelCase ( self : Optional[int] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Dict ):
'''simple docstring'''
if start == end:
return SegmentTreeNode(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , self.collection[start] )
__a : Tuple = (start + end) // 2
__a : Optional[int] = self._build_tree(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
__a : Tuple = self._build_tree(mid + 1 , SCREAMING_SNAKE_CASE__ )
return SegmentTreeNode(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , self.fn(left.val , right.val ) , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def __lowerCAmelCase ( self : List[Any] , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : List[Any] ):
'''simple docstring'''
if node.start == i and node.end == i:
__a : Optional[Any] = val
return
if i <= node.mid:
self._update_tree(node.left , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
else:
self._update_tree(node.right , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
__a : int = self.fn(node.left.val , node.right.val )
def __lowerCAmelCase ( self : Any , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Optional[Any] ):
'''simple docstring'''
if node.start == i and node.end == j:
return node.val
if i <= node.mid:
if j <= node.mid:
# range in left child tree
return self._query_range(node.left , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
else:
# range in left child tree and right child tree
return self.fn(
self._query_range(node.left , SCREAMING_SNAKE_CASE__ , node.mid ) , self._query_range(node.right , node.mid + 1 , SCREAMING_SNAKE_CASE__ ) , )
else:
# range in right child tree
return self._query_range(node.right , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def __lowerCAmelCase ( self : List[Any] ):
'''simple docstring'''
if self.root is not None:
__a : Tuple = Queue()
queue.put(self.root )
while not queue.empty():
__a : Tuple = queue.get()
yield node
if node.left is not None:
queue.put(node.left )
if node.right is not None:
queue.put(node.right )
if __name__ == "__main__":
import operator
for fn in [operator.add, max, min]:
print('''*''' * 50)
SCREAMING_SNAKE_CASE__ = SegmentTree([2, 1, 5, 3, 4], fn)
for node in arr.traverse():
print(node)
print()
arr.update(1, 5)
for node in arr.traverse():
print(node)
print()
print(arr.query_range(3, 4)) # 7
print(arr.query_range(2, 2)) # 5
print(arr.query_range(1, 3)) # 13
print()
| 47 | 1 |
from . import __version__
# Backward compatibility imports, to make sure all those objects can be found in file_utils
from .utils import (
CLOUDFRONT_DISTRIB_PREFIX,
CONFIG_NAME,
DISABLE_TELEMETRY,
DUMMY_INPUTS,
DUMMY_MASK,
ENV_VARS_TRUE_AND_AUTO_VALUES,
ENV_VARS_TRUE_VALUES,
FEATURE_EXTRACTOR_NAME,
FLAX_WEIGHTS_NAME,
HF_MODULES_CACHE,
HUGGINGFACE_CO_PREFIX,
HUGGINGFACE_CO_RESOLVE_ENDPOINT,
MODEL_CARD_NAME,
MULTIPLE_CHOICE_DUMMY_INPUTS,
PYTORCH_PRETRAINED_BERT_CACHE,
PYTORCH_TRANSFORMERS_CACHE,
S3_BUCKET_PREFIX,
SENTENCEPIECE_UNDERLINE,
SPIECE_UNDERLINE,
TF2_WEIGHTS_NAME,
TF_WEIGHTS_NAME,
TORCH_FX_REQUIRED_VERSION,
TRANSFORMERS_CACHE,
TRANSFORMERS_DYNAMIC_MODULE_NAME,
USE_JAX,
USE_TF,
USE_TORCH,
WEIGHTS_INDEX_NAME,
WEIGHTS_NAME,
ContextManagers,
DummyObject,
EntryNotFoundError,
ExplicitEnum,
ModelOutput,
PaddingStrategy,
PushToHubMixin,
RepositoryNotFoundError,
RevisionNotFoundError,
TensorType,
_LazyModule,
add_code_sample_docstrings,
add_end_docstrings,
add_start_docstrings,
add_start_docstrings_to_model_forward,
cached_property,
copy_func,
default_cache_path,
define_sagemaker_information,
get_cached_models,
get_file_from_repo,
get_full_repo_name,
get_torch_version,
has_file,
http_user_agent,
is_apex_available,
is_bsa_available,
is_coloredlogs_available,
is_datasets_available,
is_detectrona_available,
is_faiss_available,
is_flax_available,
is_ftfy_available,
is_in_notebook,
is_ipex_available,
is_librosa_available,
is_offline_mode,
is_onnx_available,
is_pandas_available,
is_phonemizer_available,
is_protobuf_available,
is_psutil_available,
is_pyanvml_available,
is_pyctcdecode_available,
is_pytesseract_available,
is_pytorch_quantization_available,
is_rjieba_available,
is_sagemaker_dp_enabled,
is_sagemaker_mp_enabled,
is_scipy_available,
is_sentencepiece_available,
is_seqio_available,
is_sklearn_available,
is_soundfile_availble,
is_spacy_available,
is_speech_available,
is_tensor,
is_tensorflow_probability_available,
is_tfaonnx_available,
is_tf_available,
is_timm_available,
is_tokenizers_available,
is_torch_available,
is_torch_bfaa_available,
is_torch_cuda_available,
is_torch_fx_available,
is_torch_fx_proxy,
is_torch_mps_available,
is_torch_tfaa_available,
is_torch_tpu_available,
is_torchaudio_available,
is_training_run_on_sagemaker,
is_vision_available,
replace_return_docstrings,
requires_backends,
to_numpy,
to_py_obj,
torch_only_method,
)
| 47 |
import os
import posixpath
import uuid
from dataclasses import dataclass
from typing import TYPE_CHECKING, Iterable, List, Optional, Tuple, Union
import numpy as np
import pyarrow as pa
import datasets
from datasets.arrow_writer import ArrowWriter, ParquetWriter
from datasets.config import MAX_SHARD_SIZE
from datasets.filesystems import (
is_remote_filesystem,
rename,
)
from datasets.iterable_dataset import _BaseExamplesIterable
from datasets.utils.py_utils import convert_file_size_to_int
SCREAMING_SNAKE_CASE__ = datasets.utils.logging.get_logger(__name__)
if TYPE_CHECKING:
import pyspark
@dataclass
class _UpperCamelCase( datasets.BuilderConfig ):
__SCREAMING_SNAKE_CASE : Optional[datasets.Features] = None
def UpperCAmelCase__ ( lowerCamelCase_ : "pyspark.sql.DataFrame" , lowerCamelCase_ : List[int] , ):
import pyspark
def generate_fn():
__a : List[Any] = df.select('*' , pyspark.sql.functions.spark_partition_id().alias('part_id' ) )
for partition_id in partition_order:
__a : Optional[int] = df_with_partition_id.select('*' ).where(f'''part_id = {partition_id}''' ).drop('part_id' )
__a : Optional[Any] = partition_df.collect()
__a : Union[str, Any] = 0
for row in rows:
yield f'''{partition_id}_{row_id}''', row.asDict()
row_id += 1
return generate_fn
class _UpperCamelCase( _BaseExamplesIterable ):
def __init__( self : Any , SCREAMING_SNAKE_CASE__ : "pyspark.sql.DataFrame" , SCREAMING_SNAKE_CASE__ : Dict=None , ):
'''simple docstring'''
__a : List[str] = df
__a : Tuple = partition_order or range(self.df.rdd.getNumPartitions() )
__a : List[Any] = _generate_iterable_examples(self.df , self.partition_order )
def __iter__( self : Tuple ):
'''simple docstring'''
yield from self.generate_examples_fn()
def __lowerCAmelCase ( self : Optional[int] , SCREAMING_SNAKE_CASE__ : np.random.Generator ):
'''simple docstring'''
__a : Union[str, Any] = list(range(self.df.rdd.getNumPartitions() ) )
generator.shuffle(SCREAMING_SNAKE_CASE__ )
return SparkExamplesIterable(self.df , partition_order=SCREAMING_SNAKE_CASE__ )
def __lowerCAmelCase ( self : str , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int ):
'''simple docstring'''
__a : Union[str, Any] = self.split_shard_indices_by_worker(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
return SparkExamplesIterable(self.df , partition_order=SCREAMING_SNAKE_CASE__ )
@property
def __lowerCAmelCase ( self : Dict ):
'''simple docstring'''
return len(self.partition_order )
class _UpperCamelCase( datasets.DatasetBuilder ):
__SCREAMING_SNAKE_CASE : List[str] = SparkConfig
def __init__( self : List[str] , SCREAMING_SNAKE_CASE__ : "pyspark.sql.DataFrame" , SCREAMING_SNAKE_CASE__ : str = None , SCREAMING_SNAKE_CASE__ : str = None , **SCREAMING_SNAKE_CASE__ : Optional[int] , ):
'''simple docstring'''
import pyspark
__a : int = pyspark.sql.SparkSession.builder.getOrCreate()
__a : Optional[int] = df
__a : List[Any] = working_dir
super().__init__(
cache_dir=SCREAMING_SNAKE_CASE__ , config_name=str(self.df.semanticHash() ) , **SCREAMING_SNAKE_CASE__ , )
def __lowerCAmelCase ( self : Optional[int] ):
'''simple docstring'''
def create_cache_and_write_probe(SCREAMING_SNAKE_CASE__ : List[str] ):
# makedirs with exist_ok will recursively create the directory. It will not throw an error if directories
# already exist.
os.makedirs(self._cache_dir , exist_ok=SCREAMING_SNAKE_CASE__ )
__a : List[Any] = os.path.join(self._cache_dir , 'fs_test' + uuid.uuida().hex )
# Opening the file in append mode will create a new file unless it already exists, in which case it will not
# change the file contents.
open(SCREAMING_SNAKE_CASE__ , 'a' )
return [probe_file]
if self._spark.conf.get('spark.master' , '' ).startswith('local' ):
return
# If the cluster is multi-node, make sure that the user provided a cache_dir and that it is on an NFS
# accessible to the driver.
# TODO: Stream batches to the driver using ArrowCollectSerializer instead of throwing an error.
if self._cache_dir:
__a : List[Any] = (
self._spark.sparkContext.parallelize(range(1 ) , 1 ).mapPartitions(SCREAMING_SNAKE_CASE__ ).collect()
)
if os.path.isfile(probe[0] ):
return
raise ValueError(
'When using Dataset.from_spark on a multi-node cluster, the driver and all workers should be able to access cache_dir' )
def __lowerCAmelCase ( self : List[str] ):
'''simple docstring'''
return datasets.DatasetInfo(features=self.config.features )
def __lowerCAmelCase ( self : int , SCREAMING_SNAKE_CASE__ : datasets.download.download_manager.DownloadManager ):
'''simple docstring'''
return [datasets.SplitGenerator(name=datasets.Split.TRAIN )]
def __lowerCAmelCase ( self : List[str] , SCREAMING_SNAKE_CASE__ : Any ):
'''simple docstring'''
import pyspark
def get_arrow_batch_size(SCREAMING_SNAKE_CASE__ : int ):
for batch in it:
yield pa.RecordBatch.from_pydict({'batch_bytes': [batch.nbytes]} )
__a : List[str] = self.df.count()
__a : Dict = df_num_rows if df_num_rows <= 1_0_0 else 1_0_0
# Approximate the size of each row (in Arrow format) by averaging over a max-100-row sample.
__a : List[str] = (
self.df.limit(SCREAMING_SNAKE_CASE__ )
.repartition(1 )
.mapInArrow(SCREAMING_SNAKE_CASE__ , 'batch_bytes: long' )
.agg(pyspark.sql.functions.sum('batch_bytes' ).alias('sample_bytes' ) )
.collect()[0]
.sample_bytes
/ sample_num_rows
)
__a : Dict = approx_bytes_per_row * df_num_rows
if approx_total_size > max_shard_size:
# Make sure there is at least one row per partition.
__a : Union[str, Any] = min(SCREAMING_SNAKE_CASE__ , int(approx_total_size / max_shard_size ) )
__a : int = self.df.repartition(SCREAMING_SNAKE_CASE__ )
def __lowerCAmelCase ( self : Tuple , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : int , ):
'''simple docstring'''
import pyspark
__a : Any = ParquetWriter if file_format == 'parquet' else ArrowWriter
__a : Union[str, Any] = os.path.join(self._working_dir , os.path.basename(SCREAMING_SNAKE_CASE__ ) ) if self._working_dir else fpath
__a : Optional[int] = file_format == 'parquet'
# Define these so that we don't reference self in write_arrow, which will result in a pickling error due to
# pickling the SparkContext.
__a : List[str] = self.config.features
__a : int = self._writer_batch_size
__a : Union[str, Any] = self._fs.storage_options
def write_arrow(SCREAMING_SNAKE_CASE__ : Optional[int] ):
# Within the same SparkContext, no two task attempts will share the same attempt ID.
__a : Any = pyspark.TaskContext().taskAttemptId()
__a : str = next(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
if first_batch is None:
# Some partitions might not receive any data.
return pa.RecordBatch.from_arrays(
[[task_id], [0], [0]] , names=['task_id', 'num_examples', 'num_bytes'] , )
__a : Any = 0
__a : List[str] = writer_class(
features=SCREAMING_SNAKE_CASE__ , path=working_fpath.replace('SSSSS' , f'''{shard_id:05d}''' ).replace('TTTTT' , f'''{task_id:05d}''' ) , writer_batch_size=SCREAMING_SNAKE_CASE__ , storage_options=SCREAMING_SNAKE_CASE__ , embed_local_files=SCREAMING_SNAKE_CASE__ , )
__a : Optional[Any] = pa.Table.from_batches([first_batch] )
writer.write_table(SCREAMING_SNAKE_CASE__ )
for batch in it:
if max_shard_size is not None and writer._num_bytes >= max_shard_size:
__a , __a : Optional[int] = writer.finalize()
writer.close()
yield pa.RecordBatch.from_arrays(
[[task_id], [num_examples], [num_bytes]] , names=['task_id', 'num_examples', 'num_bytes'] , )
shard_id += 1
__a : Optional[Any] = writer_class(
features=writer._features , path=working_fpath.replace('SSSSS' , f'''{shard_id:05d}''' ).replace('TTTTT' , f'''{task_id:05d}''' ) , writer_batch_size=SCREAMING_SNAKE_CASE__ , storage_options=SCREAMING_SNAKE_CASE__ , embed_local_files=SCREAMING_SNAKE_CASE__ , )
__a : Union[str, Any] = pa.Table.from_batches([batch] )
writer.write_table(SCREAMING_SNAKE_CASE__ )
if writer._num_bytes > 0:
__a , __a : str = writer.finalize()
writer.close()
yield pa.RecordBatch.from_arrays(
[[task_id], [num_examples], [num_bytes]] , names=['task_id', 'num_examples', 'num_bytes'] , )
if working_fpath != fpath:
for file in os.listdir(os.path.dirname(SCREAMING_SNAKE_CASE__ ) ):
__a : Any = os.path.join(os.path.dirname(SCREAMING_SNAKE_CASE__ ) , os.path.basename(SCREAMING_SNAKE_CASE__ ) )
shutil.move(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
__a : Dict = (
self.df.mapInArrow(SCREAMING_SNAKE_CASE__ , 'task_id: long, num_examples: long, num_bytes: long' )
.groupBy('task_id' )
.agg(
pyspark.sql.functions.sum('num_examples' ).alias('total_num_examples' ) , pyspark.sql.functions.sum('num_bytes' ).alias('total_num_bytes' ) , pyspark.sql.functions.count('num_bytes' ).alias('num_shards' ) , pyspark.sql.functions.collect_list('num_examples' ).alias('shard_lengths' ) , )
.collect()
)
for row in stats:
yield row.task_id, (row.total_num_examples, row.total_num_bytes, row.num_shards, row.shard_lengths)
def __lowerCAmelCase ( self : Union[str, Any] , SCREAMING_SNAKE_CASE__ : "datasets.SplitGenerator" , SCREAMING_SNAKE_CASE__ : str = "arrow" , SCREAMING_SNAKE_CASE__ : Optional[Union[str, int]] = None , SCREAMING_SNAKE_CASE__ : Optional[int] = None , **SCREAMING_SNAKE_CASE__ : Optional[Any] , ):
'''simple docstring'''
self._validate_cache_dir()
__a : List[str] = convert_file_size_to_int(max_shard_size or MAX_SHARD_SIZE )
self._repartition_df_if_needed(SCREAMING_SNAKE_CASE__ )
__a : Union[str, Any] = not is_remote_filesystem(self._fs )
__a : Optional[Any] = os.path.join if is_local else posixpath.join
__a : Any = '-TTTTT-SSSSS-of-NNNNN'
__a : Union[str, Any] = f'''{self.name}-{split_generator.name}{SUFFIX}.{file_format}'''
__a : Any = path_join(self._output_dir , SCREAMING_SNAKE_CASE__ )
__a : Any = 0
__a : Dict = 0
__a : int = 0
__a : List[str] = []
__a : Optional[int] = []
for task_id, content in self._prepare_split_single(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
(
(
__a
) , (
__a
) , (
__a
) , (
__a
) ,
) : Optional[int] = content
if num_bytes > 0:
total_num_examples += num_examples
total_num_bytes += num_bytes
total_shards += num_shards
task_id_and_num_shards.append((task_id, num_shards) )
all_shard_lengths.extend(SCREAMING_SNAKE_CASE__ )
__a : List[str] = total_num_examples
__a : Optional[int] = total_num_bytes
# should rename everything at the end
logger.debug(f'''Renaming {total_shards} shards.''' )
if total_shards > 1:
__a : Any = all_shard_lengths
# Define fs outside of _rename_shard so that we don't reference self in the function, which will result in a
# pickling error due to pickling the SparkContext.
__a : Dict = self._fs
# use the -SSSSS-of-NNNNN pattern
def _rename_shard(
SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int , ):
rename(
SCREAMING_SNAKE_CASE__ , fpath.replace('SSSSS' , f'''{shard_id:05d}''' ).replace('TTTTT' , f'''{task_id:05d}''' ) , fpath.replace('TTTTT-SSSSS' , f'''{global_shard_id:05d}''' ).replace('NNNNN' , f'''{total_shards:05d}''' ) , )
__a : Union[str, Any] = []
__a : List[str] = 0
for i in range(len(SCREAMING_SNAKE_CASE__ ) ):
__a , __a : Union[str, Any] = task_id_and_num_shards[i]
for shard_id in range(SCREAMING_SNAKE_CASE__ ):
args.append([task_id, shard_id, global_shard_id] )
global_shard_id += 1
self._spark.sparkContext.parallelize(SCREAMING_SNAKE_CASE__ , len(SCREAMING_SNAKE_CASE__ ) ).map(lambda SCREAMING_SNAKE_CASE__ : _rename_shard(*SCREAMING_SNAKE_CASE__ ) ).collect()
else:
# don't use any pattern
__a : List[Any] = 0
__a : Any = task_id_and_num_shards[0][0]
self._rename(
fpath.replace('SSSSS' , f'''{shard_id:05d}''' ).replace('TTTTT' , f'''{task_id:05d}''' ) , fpath.replace(SCREAMING_SNAKE_CASE__ , '' ) , )
def __lowerCAmelCase ( self : str , SCREAMING_SNAKE_CASE__ : "datasets.SplitGenerator" , ):
'''simple docstring'''
return SparkExamplesIterable(self.df )
| 47 | 1 |
def UpperCAmelCase__ ( lowerCamelCase_ : int = 1_0_0_0_0_0_0 ):
__a : Optional[Any] = 1
__a : Tuple = 1
__a : Dict = {1: 1}
for inputa in range(2 , lowerCamelCase_ ):
__a : str = 0
__a : Optional[int] = inputa
while True:
if number in counters:
counter += counters[number]
break
if number % 2 == 0:
number //= 2
counter += 1
else:
__a : Union[str, Any] = (3 * number) + 1
counter += 1
if inputa not in counters:
__a : Union[str, Any] = counter
if counter > pre_counter:
__a : Dict = inputa
__a : Optional[int] = counter
return largest_number
if __name__ == "__main__":
print(solution(int(input().strip())))
| 47 |
import argparse
import logging
import os
from datetime import datetime
import numpy as np
import torch
from torch import nn
from torch.utils.data import DataLoader, RandomSampler, TensorDataset
from tqdm import tqdm
from transformers import GPTaLMHeadModel
SCREAMING_SNAKE_CASE__ = logging.getLogger(__name__)
def UpperCAmelCase__ ( lowerCamelCase_ : str , lowerCamelCase_ : int ):
# save results
if os.path.exists(lowerCamelCase_ ):
if os.path.exists(os.path.join(lowerCamelCase_ , 'config.json' ) ) and os.path.isfile(
os.path.join(lowerCamelCase_ , 'config.json' ) ):
os.remove(os.path.join(lowerCamelCase_ , 'config.json' ) )
if os.path.exists(os.path.join(lowerCamelCase_ , 'pytorch_model.bin' ) ) and os.path.isfile(
os.path.join(lowerCamelCase_ , 'pytorch_model.bin' ) ):
os.remove(os.path.join(lowerCamelCase_ , 'pytorch_model.bin' ) )
else:
os.makedirs(lowerCamelCase_ )
model.save_pretrained(lowerCamelCase_ )
def UpperCAmelCase__ ( lowerCamelCase_ : int , lowerCamelCase_ : Any=False ):
__a : Dict = 2
if unlogit:
__a : Optional[Any] = torch.pow(lowerCamelCase_ , lowerCamelCase_ )
__a : Any = p * torch.log(lowerCamelCase_ )
__a : Union[str, Any] = 0
return -plogp.sum(dim=-1 )
def UpperCAmelCase__ ( lowerCamelCase_ : Any ):
logger.info('lv, h >\t' + '\t'.join(f'''{x + 1}''' for x in range(len(lowerCamelCase_ ) ) ) )
for row in range(len(lowerCamelCase_ ) ):
if tensor.dtype != torch.long:
logger.info(f'''layer {row + 1}:\t''' + '\t'.join(f'''{x:.5f}''' for x in tensor[row].cpu().data ) )
else:
logger.info(f'''layer {row + 1}:\t''' + '\t'.join(f'''{x:d}''' for x in tensor[row].cpu().data ) )
def UpperCAmelCase__ ( lowerCamelCase_ : Optional[int] , lowerCamelCase_ : Any , lowerCamelCase_ : int , lowerCamelCase_ : int=True , lowerCamelCase_ : Optional[Any]=True , lowerCamelCase_ : List[Any]=None , lowerCamelCase_ : List[Any]=False ):
__a , __a : Optional[int] = model.config.num_hidden_layers, model.config.num_attention_heads
__a : str = torch.zeros(lowerCamelCase_ , lowerCamelCase_ ).to(args.device )
__a : int = torch.zeros(lowerCamelCase_ , lowerCamelCase_ ).to(args.device )
if head_mask is None:
__a : Union[str, Any] = torch.ones(lowerCamelCase_ , lowerCamelCase_ ).to(args.device )
head_mask.requires_grad_(requires_grad=lowerCamelCase_ )
# If actually pruned attention multi-head, set head mask to None to avoid shape mismatch
if actually_pruned:
__a : Any = None
__a : Optional[int] = 0.0
__a : Optional[Any] = 0.0
for step, inputs in enumerate(tqdm(lowerCamelCase_ , desc='Iteration' , disable=args.local_rank not in [-1, 0] ) ):
__a : Dict = tuple(t.to(args.device ) for t in inputs )
((__a) , ) : Dict = inputs
# Do a forward pass (not with torch.no_grad() since we need gradients for importance score - see below)
__a : List[Any] = model(lowerCamelCase_ , labels=lowerCamelCase_ , head_mask=lowerCamelCase_ )
# (loss), lm_logits, presents, (all hidden_states), (attentions)
__a , __a , __a : int = (
outputs[0],
outputs[1],
outputs[-1],
) # Loss and logits are the first, attention the last
loss.backward() # Backpropagate to populate the gradients in the head mask
total_loss += loss.detach().cpu().numpy()
if compute_entropy:
for layer, attn in enumerate(lowerCamelCase_ ):
__a : List[str] = entropy(attn.detach() , lowerCamelCase_ )
attn_entropy[layer] += masked_entropy.sum(-1 ).sum(0 ).sum(0 ).detach()
if compute_importance:
head_importance += head_mask.grad.abs().detach()
tot_tokens += torch.ones_like(lowerCamelCase_ ).float().detach().sum().data
# Normalize
attn_entropy /= tot_tokens
head_importance /= tot_tokens
# Layerwise importance normalization
if not args.dont_normalize_importance_by_layer:
__a : Optional[Any] = 2
__a : Union[str, Any] = torch.pow(torch.pow(lowerCamelCase_ , lowerCamelCase_ ).sum(-1 ) , 1 / exponent )
head_importance /= norm_by_layer.unsqueeze(-1 ) + 1e-20
if not args.dont_normalize_global_importance:
__a : List[str] = (head_importance - head_importance.min()) / (head_importance.max() - head_importance.min())
# Print matrices
if compute_entropy:
logger.info('Attention entropies' )
print_ad_tensor(lowerCamelCase_ )
if compute_importance:
logger.info('Head importance scores' )
print_ad_tensor(lowerCamelCase_ )
logger.info('Head ranked by importance scores' )
__a : Optional[Any] = torch.zeros(head_importance.numel() , dtype=torch.long , device=args.device )
__a : str = torch.arange(
head_importance.numel() , device=args.device )
__a : Tuple = head_ranks.view_as(lowerCamelCase_ )
print_ad_tensor(lowerCamelCase_ )
return attn_entropy, head_importance, total_loss
def UpperCAmelCase__ ( lowerCamelCase_ : Union[str, Any] , lowerCamelCase_ : Tuple , lowerCamelCase_ : int ):
__a , __a , __a : Optional[int] = compute_heads_importance(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , compute_entropy=lowerCamelCase_ )
__a : Tuple = 1 / loss # instead of downsteam score use the LM loss
logger.info('Pruning: original score: %f, threshold: %f' , lowerCamelCase_ , original_score * args.masking_threshold )
__a : Tuple = torch.ones_like(lowerCamelCase_ )
__a : int = max(1 , int(new_head_mask.numel() * args.masking_amount ) )
__a : Tuple = original_score
while current_score >= original_score * args.masking_threshold:
__a : Optional[Any] = new_head_mask.clone().detach() # save current head mask
# heads from least important to most - keep only not-masked heads
__a : List[str] = float('Inf' )
__a : List[Any] = head_importance.view(-1 ).sort()[1]
if len(lowerCamelCase_ ) <= num_to_mask:
print('BREAK BY num_to_mask' )
break
# mask heads
__a : Any = current_heads_to_mask[:num_to_mask]
logger.info('Heads to mask: %s' , str(current_heads_to_mask.tolist() ) )
__a : int = new_head_mask.view(-1 )
__a : Tuple = 0.0
__a : int = new_head_mask.view_as(lowerCamelCase_ )
__a : Optional[int] = new_head_mask.clone().detach()
print_ad_tensor(lowerCamelCase_ )
# Compute metric and head importance again
__a , __a , __a : int = compute_heads_importance(
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , compute_entropy=lowerCamelCase_ , head_mask=lowerCamelCase_ )
__a : List[Any] = 1 / loss
logger.info(
'Masking: current score: %f, remaining heads %d (%.1f percents)' , lowerCamelCase_ , new_head_mask.sum() , new_head_mask.sum() / new_head_mask.numel() * 1_0_0 , )
logger.info('Final head mask' )
print_ad_tensor(lowerCamelCase_ )
np.save(os.path.join(args.output_dir , 'head_mask.npy' ) , head_mask.detach().cpu().numpy() )
return head_mask
def UpperCAmelCase__ ( lowerCamelCase_ : Dict , lowerCamelCase_ : Tuple , lowerCamelCase_ : Any , lowerCamelCase_ : Union[str, Any] ):
__a : List[Any] = datetime.now()
__a , __a , __a : List[str] = compute_heads_importance(
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , compute_entropy=lowerCamelCase_ , compute_importance=lowerCamelCase_ , head_mask=lowerCamelCase_ )
__a : List[str] = 1 / loss
__a : List[Any] = datetime.now() - before_time
__a : List[str] = sum(p.numel() for p in model.parameters() )
__a : Dict = {
layer: (1 - head_mask[layer].long()).nonzero().squeeze().tolist() for layer in range(len(lowerCamelCase_ ) )
}
for k, v in heads_to_prune.items():
if isinstance(lowerCamelCase_ , lowerCamelCase_ ):
__a : Tuple = [
v,
]
assert sum(len(lowerCamelCase_ ) for h in heads_to_prune.values() ) == (1 - head_mask.long()).sum().item()
model.prune_heads(lowerCamelCase_ )
__a : Optional[Any] = sum(p.numel() for p in model.parameters() )
__a : Tuple = datetime.now()
__a , __a , __a : Tuple = compute_heads_importance(
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , compute_entropy=lowerCamelCase_ , compute_importance=lowerCamelCase_ , head_mask=lowerCamelCase_ , actually_pruned=lowerCamelCase_ , )
__a : Optional[Any] = 1 / loss
__a : List[Any] = datetime.now() - before_time
logger.info(
'Pruning: original num of params: %.2e, after pruning %.2e (%.1f percents)' , lowerCamelCase_ , lowerCamelCase_ , pruned_num_params / original_num_params * 1_0_0 , )
logger.info('Pruning: score with masking: %f score with pruning: %f' , lowerCamelCase_ , lowerCamelCase_ )
logger.info('Pruning: speed ratio (original timing / new timing): %f percents' , original_time / new_time * 1_0_0 )
save_model(lowerCamelCase_ , args.output_dir )
def UpperCAmelCase__ ( ):
__a : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--data_dir' , default=lowerCamelCase_ , type=lowerCamelCase_ , required=lowerCamelCase_ , help='The input data dir. Should contain the .tsv files (or other data files) for the task.' , )
parser.add_argument(
'--model_name_or_path' , default=lowerCamelCase_ , type=lowerCamelCase_ , required=lowerCamelCase_ , help='Path to pretrained model or model identifier from huggingface.co/models' , )
parser.add_argument(
'--output_dir' , default=lowerCamelCase_ , type=lowerCamelCase_ , required=lowerCamelCase_ , help='The output directory where the model predictions and checkpoints will be written.' , )
# Other parameters
parser.add_argument(
'--config_name' , default='' , type=lowerCamelCase_ , help='Pretrained config name or path if not the same as model_name_or_path' , )
parser.add_argument(
'--tokenizer_name' , default='' , type=lowerCamelCase_ , help='Pretrained tokenizer name or path if not the same as model_name_or_path' , )
parser.add_argument(
'--cache_dir' , default=lowerCamelCase_ , type=lowerCamelCase_ , help='Where do you want to store the pre-trained models downloaded from s3' , )
parser.add_argument(
'--data_subset' , type=lowerCamelCase_ , default=-1 , help='If > 0: limit the data to a subset of data_subset instances.' )
parser.add_argument(
'--overwrite_output_dir' , action='store_true' , help='Whether to overwrite data in output directory' )
parser.add_argument(
'--overwrite_cache' , action='store_true' , help='Overwrite the cached training and evaluation sets' )
parser.add_argument(
'--dont_normalize_importance_by_layer' , action='store_true' , help='Don\'t normalize importance score by layers' )
parser.add_argument(
'--dont_normalize_global_importance' , action='store_true' , help='Don\'t normalize all importance scores between 0 and 1' , )
parser.add_argument(
'--try_masking' , action='store_true' , help='Whether to try to mask head until a threshold of accuracy.' )
parser.add_argument(
'--masking_threshold' , default=0.9 , type=lowerCamelCase_ , help='masking threshold in term of metrics (stop masking when metric < threshold * original metric value).' , )
parser.add_argument(
'--masking_amount' , default=0.1 , type=lowerCamelCase_ , help='Amount to heads to masking at each masking step.' )
parser.add_argument('--metric_name' , default='acc' , type=lowerCamelCase_ , help='Metric to use for head masking.' )
parser.add_argument(
'--max_seq_length' , default=1_2_8 , type=lowerCamelCase_ , help=(
'The maximum total input sequence length after WordPiece tokenization. \n'
'Sequences longer than this will be truncated, sequences shorter padded.'
) , )
parser.add_argument('--batch_size' , default=1 , type=lowerCamelCase_ , help='Batch size.' )
parser.add_argument('--seed' , type=lowerCamelCase_ , default=4_2 )
parser.add_argument('--local_rank' , type=lowerCamelCase_ , default=-1 , help='local_rank for distributed training on gpus' )
parser.add_argument('--no_cuda' , action='store_true' , help='Whether not to use CUDA when available' )
parser.add_argument('--server_ip' , type=lowerCamelCase_ , default='' , help='Can be used for distant debugging.' )
parser.add_argument('--server_port' , type=lowerCamelCase_ , default='' , help='Can be used for distant debugging.' )
__a : Optional[Any] = parser.parse_args()
if args.server_ip and args.server_port:
# Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script
import ptvsd
print('Waiting for debugger attach' )
ptvsd.enable_attach(address=(args.server_ip, args.server_port) , redirect_output=lowerCamelCase_ )
ptvsd.wait_for_attach()
# Setup devices and distributed training
if args.local_rank == -1 or args.no_cuda:
__a : List[str] = torch.device('cuda' if torch.cuda.is_available() and not args.no_cuda else 'cpu' )
__a : Tuple = 0 if args.no_cuda else torch.cuda.device_count()
else:
torch.cuda.set_device(args.local_rank )
__a : Union[str, Any] = torch.device('cuda' , args.local_rank )
__a : Any = 1
torch.distributed.init_process_group(backend='nccl' ) # Initializes the distributed backend
# Setup logging
logging.basicConfig(level=logging.INFO if args.local_rank in [-1, 0] else logging.WARN )
logger.info('device: {} n_gpu: {}, distributed: {}'.format(args.device , args.n_gpu , bool(args.local_rank != -1 ) ) )
__a : Optional[Any] = GPTaLMHeadModel.from_pretrained(args.model_name_or_path )
# Distributed and parallel training
model.to(args.device )
if args.local_rank != -1:
__a : List[Any] = nn.parallel.DistributedDataParallel(
lowerCamelCase_ , device_ids=[args.local_rank] , output_device=args.local_rank , find_unused_parameters=lowerCamelCase_ )
elif args.n_gpu > 1:
__a : Union[str, Any] = nn.DataParallel(lowerCamelCase_ )
# Print/save training arguments
os.makedirs(args.output_dir , exist_ok=lowerCamelCase_ )
torch.save(lowerCamelCase_ , os.path.join(args.output_dir , 'run_args.bin' ) )
logger.info('Training/evaluation parameters %s' , lowerCamelCase_ )
# Prepare dataset
__a : Tuple = np.concatenate(
[
np.loadtxt(args.data_dir , dtype=np.intaa ),
] )
__a : str = (torch.from_numpy(lowerCamelCase_ ),)
__a : List[str] = TensorDataset(*lowerCamelCase_ )
__a : Optional[Any] = RandomSampler(lowerCamelCase_ )
__a : Union[str, Any] = DataLoader(lowerCamelCase_ , sampler=lowerCamelCase_ , batch_size=args.batch_size )
# Compute head entropy and importance score
compute_heads_importance(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
# Try head masking (set heads to zero until the score goes under a threshole)
# and head pruning (remove masked heads and see the effect on the network)
if args.try_masking and args.masking_threshold > 0.0 and args.masking_threshold < 1.0:
__a : Union[str, Any] = mask_heads(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
prune_heads(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
if __name__ == "__main__":
main()
| 47 | 1 |
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
WavaVecaConfig,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaForCTC,
WavaVecaForPreTraining,
WavaVecaProcessor,
logging,
)
from transformers.models.wavaveca.modeling_wavaveca import WavaVecaForSequenceClassification
logging.set_verbosity_info()
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ = {
'''post_extract_proj''': '''feature_projection.projection''',
'''encoder.pos_conv.0''': '''encoder.pos_conv_embed.conv''',
'''self_attn.k_proj''': '''encoder.layers.*.attention.k_proj''',
'''self_attn.v_proj''': '''encoder.layers.*.attention.v_proj''',
'''self_attn.q_proj''': '''encoder.layers.*.attention.q_proj''',
'''self_attn.out_proj''': '''encoder.layers.*.attention.out_proj''',
'''self_attn_layer_norm''': '''encoder.layers.*.layer_norm''',
'''fc1''': '''encoder.layers.*.feed_forward.intermediate_dense''',
'''fc2''': '''encoder.layers.*.feed_forward.output_dense''',
'''final_layer_norm''': '''encoder.layers.*.final_layer_norm''',
'''encoder.layer_norm''': '''encoder.layer_norm''',
'''adapter_layer''': '''encoder.layers.*.adapter_layer''',
'''w2v_model.layer_norm''': '''feature_projection.layer_norm''',
'''quantizer.weight_proj''': '''quantizer.weight_proj''',
'''quantizer.vars''': '''quantizer.codevectors''',
'''project_q''': '''project_q''',
'''final_proj''': '''project_hid''',
'''w2v_encoder.proj''': '''lm_head''',
'''mask_emb''': '''masked_spec_embed''',
'''pooling_layer.linear''': '''projector''',
'''pooling_layer.projection''': '''classifier''',
}
SCREAMING_SNAKE_CASE__ = [
'''lm_head''',
'''quantizer.weight_proj''',
'''quantizer.codevectors''',
'''project_q''',
'''project_hid''',
'''projector''',
'''classifier''',
]
def UpperCAmelCase__ ( lowerCamelCase_ : Tuple ):
__a : str = {}
with open(lowerCamelCase_ , 'r' ) as file:
for line_number, line in enumerate(lowerCamelCase_ ):
__a : Union[str, Any] = line.strip()
if line:
__a : List[Any] = line.split()
__a : Union[str, Any] = line_number
__a : Union[str, Any] = words[0]
__a : Optional[int] = value
return result
def UpperCAmelCase__ ( lowerCamelCase_ : Dict , lowerCamelCase_ : List[str] , lowerCamelCase_ : Tuple , lowerCamelCase_ : str , lowerCamelCase_ : Optional[int] ):
for attribute in key.split('.' ):
__a : Tuple = getattr(lowerCamelCase_ , lowerCamelCase_ )
__a : Optional[int] = None
for param_key in PARAM_MAPPING.keys():
if full_name.endswith(lowerCamelCase_ ):
__a : str = PARAM_MAPPING[full_name.split('.' )[-1]]
__a : List[str] = 'param'
if weight_type is not None and weight_type != "param":
__a : Optional[int] = getattr(lowerCamelCase_ , lowerCamelCase_ ).shape
elif weight_type is not None and weight_type == "param":
__a : List[Any] = hf_pointer
for attribute in hf_param_name.split('.' ):
__a : Optional[Any] = getattr(lowerCamelCase_ , lowerCamelCase_ )
__a : Tuple = shape_pointer.shape
# let's reduce dimension
__a : List[Any] = value[0]
else:
__a : Dict = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
f'''Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'''
f''' {value.shape} for {full_name}''' )
if weight_type == "weight":
__a : Union[str, Any] = value
elif weight_type == "weight_g":
__a : Optional[Any] = value
elif weight_type == "weight_v":
__a : Any = value
elif weight_type == "bias":
__a : List[Any] = value
elif weight_type == "param":
for attribute in hf_param_name.split('.' ):
__a : Any = getattr(lowerCamelCase_ , lowerCamelCase_ )
__a : Union[str, Any] = value
else:
__a : Dict = value
logger.info(f'''{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.''' )
def UpperCAmelCase__ ( lowerCamelCase_ : int , lowerCamelCase_ : int , lowerCamelCase_ : Dict , lowerCamelCase_ : Any , lowerCamelCase_ : Tuple ):
__a : Optional[Any] = None
for param_key in PARAM_MAPPING.keys():
if full_name.endswith(lowerCamelCase_ ):
__a : Optional[int] = PARAM_MAPPING[full_name.split('.' )[-1]]
__a : Optional[int] = 'param'
if weight_type is not None and weight_type != "param":
__a : Dict = '.'.join([key, weight_type] )
elif weight_type is not None and weight_type == "param":
__a : List[str] = '.'.join([key, hf_param_name] )
else:
__a : Optional[int] = key
__a : Tuple = value if 'lm_head' in full_key else value[0]
SCREAMING_SNAKE_CASE__ = {
'''W_a''': '''linear_1.weight''',
'''W_b''': '''linear_2.weight''',
'''b_a''': '''linear_1.bias''',
'''b_b''': '''linear_2.bias''',
'''ln_W''': '''norm.weight''',
'''ln_b''': '''norm.bias''',
}
def UpperCAmelCase__ ( lowerCamelCase_ : int , lowerCamelCase_ : List[str] , lowerCamelCase_ : Optional[int]=None , lowerCamelCase_ : str=None ):
__a : str = False
for key, mapped_key in MAPPING.items():
__a : int = 'wav2vec2.' + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split('w2v_model.' )[-1] == name.split('.' )[0]:
__a : str = True
if "*" in mapped_key:
__a : List[Any] = name.split(lowerCamelCase_ )[0].split('.' )[-2]
__a : List[Any] = mapped_key.replace('*' , lowerCamelCase_ )
if "weight_g" in name:
__a : Union[str, Any] = 'weight_g'
elif "weight_v" in name:
__a : str = 'weight_v'
elif "bias" in name:
__a : Any = 'bias'
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
__a : Optional[int] = 'weight'
else:
__a : Tuple = None
if hf_dict is not None:
rename_dict(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
else:
set_recursively(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
return is_used
return is_used
def UpperCAmelCase__ ( lowerCamelCase_ : Dict , lowerCamelCase_ : Any , lowerCamelCase_ : str ):
__a : Tuple = []
__a : Dict = fairseq_model.state_dict()
__a : List[str] = hf_model.wavaveca.feature_extractor
for name, value in fairseq_dict.items():
__a : Optional[int] = False
if "conv_layers" in name:
load_conv_layer(
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , hf_model.config.feat_extract_norm == 'group' , )
__a : int = True
else:
__a : Tuple = load_wavaveca_layer(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
if not is_used:
unused_weights.append(lowerCamelCase_ )
logger.warning(f'''Unused weights: {unused_weights}''' )
def UpperCAmelCase__ ( lowerCamelCase_ : int , lowerCamelCase_ : Any , lowerCamelCase_ : Tuple , lowerCamelCase_ : Optional[int] , lowerCamelCase_ : Optional[Any] ):
__a : List[Any] = full_name.split('conv_layers.' )[-1]
__a : List[str] = name.split('.' )
__a : int = int(items[0] )
__a : str = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.''' )
__a : str = value
logger.info(f'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.''' )
__a : Optional[int] = value
logger.info(f'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found.''' )
__a : Union[str, Any] = value
logger.info(f'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found.''' )
__a : Any = value
logger.info(f'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
else:
unused_weights.append(lowerCamelCase_ )
@torch.no_grad()
def UpperCAmelCase__ ( lowerCamelCase_ : List[Any] , lowerCamelCase_ : Any , lowerCamelCase_ : int=None , lowerCamelCase_ : Optional[Any]=None , lowerCamelCase_ : Union[str, Any]=True , lowerCamelCase_ : Any=False ):
if config_path is not None:
__a : Union[str, Any] = WavaVecaConfig.from_pretrained(lowerCamelCase_ )
else:
__a : Tuple = WavaVecaConfig()
if is_seq_class:
__a : int = read_txt_into_dict(lowerCamelCase_ )
__a : Any = idalabel
__a : Optional[Any] = WavaVecaForSequenceClassification(lowerCamelCase_ )
__a : Union[str, Any] = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_6_0_0_0 , padding_value=0 , do_normalize=lowerCamelCase_ , return_attention_mask=lowerCamelCase_ , )
feature_extractor.save_pretrained(lowerCamelCase_ )
elif is_finetuned:
if dict_path:
__a : List[str] = Dictionary.load(lowerCamelCase_ )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
__a : List[str] = target_dict.pad_index
__a : Any = target_dict.bos_index
__a : Dict = target_dict.eos_index
__a : List[str] = len(target_dict.symbols )
__a : Optional[Any] = os.path.join(lowerCamelCase_ , 'vocab.json' )
if not os.path.isdir(lowerCamelCase_ ):
logger.error('--pytorch_dump_folder_path ({}) should be a directory'.format(lowerCamelCase_ ) )
return
os.makedirs(lowerCamelCase_ , exist_ok=lowerCamelCase_ )
__a : Optional[Any] = target_dict.indices
# fairseq has the <pad> and <s> switched
__a : Optional[Any] = 0
__a : Union[str, Any] = 1
with open(lowerCamelCase_ , 'w' , encoding='utf-8' ) as vocab_handle:
json.dump(lowerCamelCase_ , lowerCamelCase_ )
__a : str = WavaVecaCTCTokenizer(
lowerCamelCase_ , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token='|' , do_lower_case=lowerCamelCase_ , )
__a : Any = True if config.feat_extract_norm == 'layer' else False
__a : Optional[int] = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_6_0_0_0 , padding_value=0 , do_normalize=lowerCamelCase_ , return_attention_mask=lowerCamelCase_ , )
__a : Tuple = WavaVecaProcessor(feature_extractor=lowerCamelCase_ , tokenizer=lowerCamelCase_ )
processor.save_pretrained(lowerCamelCase_ )
__a : List[Any] = WavaVecaForCTC(lowerCamelCase_ )
else:
__a : List[str] = WavaVecaForPreTraining(lowerCamelCase_ )
if is_finetuned or is_seq_class:
__a , __a , __a : Optional[int] = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={'data': '/'.join(dict_path.split('/' )[:-1] )} )
else:
__a : int = argparse.Namespace(task='audio_pretraining' )
__a : Optional[int] = fairseq.tasks.setup_task(lowerCamelCase_ )
__a , __a , __a : Optional[int] = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] , task=lowerCamelCase_ )
__a : str = model[0].eval()
recursively_load_weights(lowerCamelCase_ , lowerCamelCase_ , not is_finetuned )
hf_wavavec.save_pretrained(lowerCamelCase_ )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ = argparse.ArgumentParser()
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to fairseq checkpoint''')
parser.add_argument('''--dict_path''', default=None, type=str, help='''Path to dict of fine-tuned model''')
parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''')
parser.add_argument(
'''--not_finetuned''', action='''store_true''', help='''Whether the model to convert is a fine-tuned model or not'''
)
parser.add_argument(
'''--is_seq_class''',
action='''store_true''',
help='''Whether the model to convert is a fine-tuned sequence classification model or not''',
)
SCREAMING_SNAKE_CASE__ = parser.parse_args()
SCREAMING_SNAKE_CASE__ = not args.not_finetuned and not args.is_seq_class
convert_wavaveca_checkpoint(
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.config_path,
args.dict_path,
is_finetuned,
args.is_seq_class,
)
| 47 |
import argparse
import os
import gluonnlp as nlp
import mxnet as mx
import numpy as np
import torch
from gluonnlp.base import get_home_dir
from gluonnlp.model.bert import BERTEncoder
from gluonnlp.model.utils import _load_vocab
from gluonnlp.vocab import Vocab
from packaging import version
from torch import nn
from transformers import BertConfig, BertForMaskedLM, BertModel, RobertaTokenizer
from transformers.models.bert.modeling_bert import (
BertIntermediate,
BertLayer,
BertOutput,
BertSelfAttention,
BertSelfOutput,
)
from transformers.utils import logging
if version.parse(nlp.__version__) != version.parse('''0.8.3'''):
raise Exception('''requires gluonnlp == 0.8.3''')
if version.parse(mx.__version__) != version.parse('''1.5.0'''):
raise Exception('''requires mxnet == 1.5.0''')
logging.set_verbosity_info()
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ = '''The Nymphenburg Palace is a beautiful palace in Munich!'''
def UpperCAmelCase__ ( lowerCamelCase_ : str , lowerCamelCase_ : str ):
__a : List[Any] = {
'attention_cell': 'multi_head',
'num_layers': 4,
'units': 1_0_2_4,
'hidden_size': 7_6_8,
'max_length': 5_1_2,
'num_heads': 8,
'scaled': True,
'dropout': 0.1,
'use_residual': True,
'embed_size': 1_0_2_4,
'embed_dropout': 0.1,
'word_embed': None,
'layer_norm_eps': 1e-5,
'token_type_vocab_size': 2,
}
__a : Optional[int] = bort_4_8_768_1024_hparams
# Let's construct the original Bort model here
# Taken from official BERT implementation, see:
# https://github.com/alexa/bort/blob/master/bort/bort.py
__a : List[str] = BERTEncoder(
attention_cell=predefined_args['attention_cell'] , num_layers=predefined_args['num_layers'] , units=predefined_args['units'] , hidden_size=predefined_args['hidden_size'] , max_length=predefined_args['max_length'] , num_heads=predefined_args['num_heads'] , scaled=predefined_args['scaled'] , dropout=predefined_args['dropout'] , output_attention=lowerCamelCase_ , output_all_encodings=lowerCamelCase_ , use_residual=predefined_args['use_residual'] , activation=predefined_args.get('activation' , 'gelu' ) , layer_norm_eps=predefined_args.get('layer_norm_eps' , lowerCamelCase_ ) , )
# Vocab information needs to be fetched first
# It's the same as RoBERTa, so RobertaTokenizer can be used later
__a : int = 'openwebtext_ccnews_stories_books_cased'
# Specify download folder to Gluonnlp's vocab
__a : Optional[Any] = os.path.join(get_home_dir() , 'models' )
__a : Optional[Any] = _load_vocab(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , cls=lowerCamelCase_ )
__a : Any = nlp.model.BERTModel(
lowerCamelCase_ , len(lowerCamelCase_ ) , units=predefined_args['units'] , embed_size=predefined_args['embed_size'] , embed_dropout=predefined_args['embed_dropout'] , word_embed=predefined_args['word_embed'] , use_pooler=lowerCamelCase_ , use_token_type_embed=lowerCamelCase_ , token_type_vocab_size=predefined_args['token_type_vocab_size'] , use_classifier=lowerCamelCase_ , use_decoder=lowerCamelCase_ , )
original_bort.load_parameters(lowerCamelCase_ , cast_dtype=lowerCamelCase_ , ignore_extra=lowerCamelCase_ )
__a : Dict = original_bort._collect_params_with_prefix()
# Build our config 🤗
__a : Optional[Any] = {
'architectures': ['BertForMaskedLM'],
'attention_probs_dropout_prob': predefined_args['dropout'],
'hidden_act': 'gelu',
'hidden_dropout_prob': predefined_args['dropout'],
'hidden_size': predefined_args['embed_size'],
'initializer_range': 0.02,
'intermediate_size': predefined_args['hidden_size'],
'layer_norm_eps': predefined_args['layer_norm_eps'],
'max_position_embeddings': predefined_args['max_length'],
'model_type': 'bort',
'num_attention_heads': predefined_args['num_heads'],
'num_hidden_layers': predefined_args['num_layers'],
'pad_token_id': 1, # 2 = BERT, 1 = RoBERTa
'type_vocab_size': 1, # 2 = BERT, 1 = RoBERTa
'vocab_size': len(lowerCamelCase_ ),
}
__a : str = BertConfig.from_dict(lowerCamelCase_ )
__a : Optional[int] = BertForMaskedLM(lowerCamelCase_ )
hf_bort_model.eval()
# Parameter mapping table (Gluonnlp to Transformers)
# * denotes layer index
#
# | Gluon Parameter | Transformers Parameter
# | -------------------------------------------------------------- | ----------------------
# | `encoder.layer_norm.beta` | `bert.embeddings.LayerNorm.bias`
# | `encoder.layer_norm.gamma` | `bert.embeddings.LayerNorm.weight`
# | `encoder.position_weight` | `bert.embeddings.position_embeddings.weight`
# | `word_embed.0.weight` | `bert.embeddings.word_embeddings.weight`
# | `encoder.transformer_cells.*.attention_cell.proj_key.bias` | `bert.encoder.layer.*.attention.self.key.bias`
# | `encoder.transformer_cells.*.attention_cell.proj_key.weight` | `bert.encoder.layer.*.attention.self.key.weight`
# | `encoder.transformer_cells.*.attention_cell.proj_query.bias` | `bert.encoder.layer.*.attention.self.query.bias`
# | `encoder.transformer_cells.*.attention_cell.proj_query.weight` | `bert.encoder.layer.*.attention.self.query.weight`
# | `encoder.transformer_cells.*.attention_cell.proj_value.bias` | `bert.encoder.layer.*.attention.self.value.bias`
# | `encoder.transformer_cells.*.attention_cell.proj_value.weight` | `bert.encoder.layer.*.attention.self.value.weight`
# | `encoder.transformer_cells.*.ffn.ffn_2.bias` | `bert.encoder.layer.*.attention.output.dense.bias`
# | `encoder.transformer_cells.*.ffn.ffn_2.weight` | `bert.encoder.layer.*.attention.output.dense.weight`
# | `encoder.transformer_cells.*.layer_norm.beta` | `bert.encoder.layer.*.attention.output.LayerNorm.bias`
# | `encoder.transformer_cells.*.layer_norm.gamma` | `bert.encoder.layer.*.attention.output.LayerNorm.weight`
# | `encoder.transformer_cells.*.ffn.ffn_1.bias` | `bert.encoder.layer.*.intermediate.dense.bias`
# | `encoder.transformer_cells.*.ffn.ffn_1.weight` | `bert.encoder.layer.*.intermediate.dense.weight`
# | `encoder.transformer_cells.*.ffn.layer_norm.beta` | `bert.encoder.layer.*.output.LayerNorm.bias`
# | `encoder.transformer_cells.*.ffn.layer_norm.gamma` | `bert.encoder.layer.*.output.LayerNorm.weight`
# | `encoder.transformer_cells.*.proj.bias` | `bert.encoder.layer.*.output.dense.bias`
# | `encoder.transformer_cells.*.proj.weight` | `bert.encoder.layer.*.output.dense.weight`
# Helper function to convert MXNET Arrays to PyTorch
def to_torch(lowerCamelCase_ : Optional[Any] ) -> nn.Parameter:
return nn.Parameter(torch.FloatTensor(mx_array.data().asnumpy() ) )
# Check param shapes and map new HF param back
def check_and_map_params(lowerCamelCase_ : Optional[Any] , lowerCamelCase_ : List[str] ):
__a : Optional[int] = hf_param.shape
__a : int = to_torch(params[gluon_param] )
__a : int = gluon_param.shape
assert (
shape_hf == shape_gluon
), f'''The gluon parameter {gluon_param} has shape {shape_gluon}, but expects shape {shape_hf} for Transformers'''
return gluon_param
__a : str = check_and_map_params(
hf_bort_model.bert.embeddings.word_embeddings.weight , 'word_embed.0.weight' )
__a : str = check_and_map_params(
hf_bort_model.bert.embeddings.position_embeddings.weight , 'encoder.position_weight' )
__a : Tuple = check_and_map_params(
hf_bort_model.bert.embeddings.LayerNorm.bias , 'encoder.layer_norm.beta' )
__a : Union[str, Any] = check_and_map_params(
hf_bort_model.bert.embeddings.LayerNorm.weight , 'encoder.layer_norm.gamma' )
# Inspired by RoBERTa conversion script, we just zero them out (Bort does not use them)
__a : Union[str, Any] = torch.zeros_like(
hf_bort_model.bert.embeddings.token_type_embeddings.weight.data )
for i in range(hf_bort_config.num_hidden_layers ):
__a : BertLayer = hf_bort_model.bert.encoder.layer[i]
# self attention
__a : BertSelfAttention = layer.attention.self
__a : Optional[int] = check_and_map_params(
self_attn.key.bias.data , f'''encoder.transformer_cells.{i}.attention_cell.proj_key.bias''' )
__a : str = check_and_map_params(
self_attn.key.weight.data , f'''encoder.transformer_cells.{i}.attention_cell.proj_key.weight''' )
__a : List[str] = check_and_map_params(
self_attn.query.bias.data , f'''encoder.transformer_cells.{i}.attention_cell.proj_query.bias''' )
__a : str = check_and_map_params(
self_attn.query.weight.data , f'''encoder.transformer_cells.{i}.attention_cell.proj_query.weight''' )
__a : Dict = check_and_map_params(
self_attn.value.bias.data , f'''encoder.transformer_cells.{i}.attention_cell.proj_value.bias''' )
__a : str = check_and_map_params(
self_attn.value.weight.data , f'''encoder.transformer_cells.{i}.attention_cell.proj_value.weight''' )
# self attention output
__a : BertSelfOutput = layer.attention.output
__a : Tuple = check_and_map_params(
self_output.dense.bias , f'''encoder.transformer_cells.{i}.proj.bias''' )
__a : Dict = check_and_map_params(
self_output.dense.weight , f'''encoder.transformer_cells.{i}.proj.weight''' )
__a : Optional[Any] = check_and_map_params(
self_output.LayerNorm.bias , f'''encoder.transformer_cells.{i}.layer_norm.beta''' )
__a : Optional[Any] = check_and_map_params(
self_output.LayerNorm.weight , f'''encoder.transformer_cells.{i}.layer_norm.gamma''' )
# intermediate
__a : BertIntermediate = layer.intermediate
__a : List[str] = check_and_map_params(
intermediate.dense.bias , f'''encoder.transformer_cells.{i}.ffn.ffn_1.bias''' )
__a : Optional[Any] = check_and_map_params(
intermediate.dense.weight , f'''encoder.transformer_cells.{i}.ffn.ffn_1.weight''' )
# output
__a : BertOutput = layer.output
__a : str = check_and_map_params(
bert_output.dense.bias , f'''encoder.transformer_cells.{i}.ffn.ffn_2.bias''' )
__a : List[Any] = check_and_map_params(
bert_output.dense.weight , f'''encoder.transformer_cells.{i}.ffn.ffn_2.weight''' )
__a : str = check_and_map_params(
bert_output.LayerNorm.bias , f'''encoder.transformer_cells.{i}.ffn.layer_norm.beta''' )
__a : List[str] = check_and_map_params(
bert_output.LayerNorm.weight , f'''encoder.transformer_cells.{i}.ffn.layer_norm.gamma''' )
# Save space and energy 🎄
hf_bort_model.half()
# Compare output of both models
__a : Union[str, Any] = RobertaTokenizer.from_pretrained('roberta-base' )
__a : Union[str, Any] = tokenizer.encode_plus(lowerCamelCase_ )['input_ids']
# Get gluon output
__a : Optional[int] = mx.nd.array([input_ids] )
__a : Tuple = original_bort(inputs=lowerCamelCase_ , token_types=[] )
# Get Transformer output (save and reload model again)
hf_bort_model.save_pretrained(lowerCamelCase_ )
__a : Optional[Any] = BertModel.from_pretrained(lowerCamelCase_ )
hf_bort_model.eval()
__a : Union[str, Any] = tokenizer.encode_plus(lowerCamelCase_ , return_tensors='pt' )
__a : int = hf_bort_model(**lowerCamelCase_ )[0]
__a : Dict = output_gluon[0].asnumpy()
__a : str = output_hf[0].detach().numpy()
__a : List[Any] = np.max(np.abs(hf_layer - gluon_layer ) ).item()
__a : str = np.allclose(lowerCamelCase_ , lowerCamelCase_ , atol=1e-3 )
if success:
print('✔️ Both model do output the same tensors' )
else:
print('❌ Both model do **NOT** output the same tensors' )
print('Absolute difference is:' , lowerCamelCase_ )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--bort_checkpoint_path''', default=None, type=str, required=True, help='''Path the official Bort params file.'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
SCREAMING_SNAKE_CASE__ = parser.parse_args()
convert_bort_checkpoint_to_pytorch(args.bort_checkpoint_path, args.pytorch_dump_folder_path)
| 47 | 1 |
from __future__ import annotations
import math
import numpy as np
from numpy.linalg import norm
def UpperCAmelCase__ ( lowerCamelCase_ : np.ndarray , lowerCamelCase_ : np.ndarray ):
return math.sqrt(sum(pow(a - b , 2 ) for a, b in zip(lowerCamelCase_ , lowerCamelCase_ ) ) )
def UpperCAmelCase__ ( lowerCamelCase_ : np.ndarray , lowerCamelCase_ : np.ndarray ):
if dataset.ndim != value_array.ndim:
__a : Tuple = (
'Wrong input data\'s dimensions... '
f'''dataset : {dataset.ndim}, value_array : {value_array.ndim}'''
)
raise ValueError(lowerCamelCase_ )
try:
if dataset.shape[1] != value_array.shape[1]:
__a : Dict = (
'Wrong input data\'s shape... '
f'''dataset : {dataset.shape[1]}, value_array : {value_array.shape[1]}'''
)
raise ValueError(lowerCamelCase_ )
except IndexError:
if dataset.ndim != value_array.ndim:
raise TypeError('Wrong shape' )
if dataset.dtype != value_array.dtype:
__a : Union[str, Any] = (
'Input data have different datatype... '
f'''dataset : {dataset.dtype}, value_array : {value_array.dtype}'''
)
raise TypeError(lowerCamelCase_ )
__a : List[Any] = []
for value in value_array:
__a : Any = euclidean(lowerCamelCase_ , dataset[0] )
__a : Tuple = dataset[0].tolist()
for dataset_value in dataset[1:]:
__a : List[str] = euclidean(lowerCamelCase_ , lowerCamelCase_ )
if dist > temp_dist:
__a : Any = temp_dist
__a : Optional[int] = dataset_value.tolist()
answer.append([vector, dist] )
return answer
def UpperCAmelCase__ ( lowerCamelCase_ : np.ndarray , lowerCamelCase_ : np.ndarray ):
return np.dot(lowerCamelCase_ , lowerCamelCase_ ) / (norm(lowerCamelCase_ ) * norm(lowerCamelCase_ ))
if __name__ == "__main__":
import doctest
doctest.testmod()
| 47 |
def UpperCAmelCase__ ( lowerCamelCase_ : Optional[int] , lowerCamelCase_ : List[str] ):
__a : Any = ''
for i in table:
res += inp[i - 1]
return res
def UpperCAmelCase__ ( lowerCamelCase_ : Optional[Any] ):
return data[1:] + data[0]
def UpperCAmelCase__ ( lowerCamelCase_ : Optional[Any] , lowerCamelCase_ : Optional[int] ):
__a : Optional[int] = ''
for i in range(len(lowerCamelCase_ ) ):
if a[i] == b[i]:
res += "0"
else:
res += "1"
return res
def UpperCAmelCase__ ( lowerCamelCase_ : List[Any] , lowerCamelCase_ : str ):
__a : List[str] = int('0b' + data[0] + data[-1] , 2 )
__a : List[str] = int('0b' + data[1:3] , 2 )
return bin(s[row][col] )[2:]
def UpperCAmelCase__ ( lowerCamelCase_ : Any , lowerCamelCase_ : List[str] , lowerCamelCase_ : int , lowerCamelCase_ : int , lowerCamelCase_ : Optional[Any] ):
__a : List[Any] = message[:4]
__a : str = message[4:]
__a : Any = apply_table(lowerCamelCase_ , lowerCamelCase_ )
__a : int = xor(lowerCamelCase_ , lowerCamelCase_ )
__a : Dict = apply_sbox(lowerCamelCase_ , temp[:4] ) # noqa: E741
__a : Tuple = apply_sbox(lowerCamelCase_ , temp[4:] )
__a : List[Any] = '0' * (2 - len(lowerCamelCase_ )) + l # noqa: E741
__a : List[str] = '0' * (2 - len(lowerCamelCase_ )) + r
__a : List[Any] = apply_table(l + r , lowerCamelCase_ )
__a : Dict = xor(lowerCamelCase_ , lowerCamelCase_ )
return temp + right
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ = input('''Enter 10 bit key: ''')
SCREAMING_SNAKE_CASE__ = input('''Enter 8 bit message: ''')
SCREAMING_SNAKE_CASE__ = [6, 3, 7, 4, 8, 5, 10, 9]
SCREAMING_SNAKE_CASE__ = [3, 5, 2, 7, 4, 10, 1, 9, 8, 6]
SCREAMING_SNAKE_CASE__ = [2, 4, 3, 1]
SCREAMING_SNAKE_CASE__ = [2, 6, 3, 1, 4, 8, 5, 7]
SCREAMING_SNAKE_CASE__ = [4, 1, 3, 5, 7, 2, 8, 6]
SCREAMING_SNAKE_CASE__ = [4, 1, 2, 3, 2, 3, 4, 1]
SCREAMING_SNAKE_CASE__ = [[1, 0, 3, 2], [3, 2, 1, 0], [0, 2, 1, 3], [3, 1, 3, 2]]
SCREAMING_SNAKE_CASE__ = [[0, 1, 2, 3], [2, 0, 1, 3], [3, 0, 1, 0], [2, 1, 0, 3]]
# key generation
SCREAMING_SNAKE_CASE__ = apply_table(key, paa_table)
SCREAMING_SNAKE_CASE__ = temp[:5]
SCREAMING_SNAKE_CASE__ = temp[5:]
SCREAMING_SNAKE_CASE__ = left_shift(left)
SCREAMING_SNAKE_CASE__ = left_shift(right)
SCREAMING_SNAKE_CASE__ = apply_table(left + right, pa_table)
SCREAMING_SNAKE_CASE__ = left_shift(left)
SCREAMING_SNAKE_CASE__ = left_shift(right)
SCREAMING_SNAKE_CASE__ = left_shift(left)
SCREAMING_SNAKE_CASE__ = left_shift(right)
SCREAMING_SNAKE_CASE__ = apply_table(left + right, pa_table)
# encryption
SCREAMING_SNAKE_CASE__ = apply_table(message, IP)
SCREAMING_SNAKE_CASE__ = function(expansion, sa, sa, keya, temp)
SCREAMING_SNAKE_CASE__ = temp[4:] + temp[:4]
SCREAMING_SNAKE_CASE__ = function(expansion, sa, sa, keya, temp)
SCREAMING_SNAKE_CASE__ = apply_table(temp, IP_inv)
print('''Cipher text is:''', CT)
# decryption
SCREAMING_SNAKE_CASE__ = apply_table(CT, IP)
SCREAMING_SNAKE_CASE__ = function(expansion, sa, sa, keya, temp)
SCREAMING_SNAKE_CASE__ = temp[4:] + temp[:4]
SCREAMING_SNAKE_CASE__ = function(expansion, sa, sa, keya, temp)
SCREAMING_SNAKE_CASE__ = apply_table(temp, IP_inv)
print('''Plain text after decypting is:''', PT)
| 47 | 1 |
from ..utils import DummyObject, requires_backends
class _UpperCamelCase( metaclass=__lowerCamelCase ):
__SCREAMING_SNAKE_CASE : Optional[Any] = ['''note_seq''']
def __init__( self : Dict , *SCREAMING_SNAKE_CASE__ : Union[str, Any] , **SCREAMING_SNAKE_CASE__ : Optional[Any] ):
'''simple docstring'''
requires_backends(self , ['note_seq'] )
@classmethod
def __lowerCAmelCase ( cls : Any , *SCREAMING_SNAKE_CASE__ : List[str] , **SCREAMING_SNAKE_CASE__ : Any ):
'''simple docstring'''
requires_backends(cls , ['note_seq'] )
@classmethod
def __lowerCAmelCase ( cls : List[str] , *SCREAMING_SNAKE_CASE__ : List[str] , **SCREAMING_SNAKE_CASE__ : int ):
'''simple docstring'''
requires_backends(cls , ['note_seq'] )
| 47 |
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_tf
if is_tf_available():
import tensorflow as tf
from tensorflow.python.eager import context
from tensorflow.python.framework import ops
from transformers import GradientAccumulator, create_optimizer
@require_tf
class _UpperCamelCase( unittest.TestCase ):
def __lowerCAmelCase ( self : List[str] , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : Union[str, Any] ):
'''simple docstring'''
self.assertEqual(len(SCREAMING_SNAKE_CASE__ ) , len(SCREAMING_SNAKE_CASE__ ) )
for a, b in zip(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
self.assertAlmostEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , delta=SCREAMING_SNAKE_CASE__ )
def __lowerCAmelCase ( self : Any ):
'''simple docstring'''
__a : List[Any] = GradientAccumulator()
accumulator([tf.constant([1.0, 2.0] )] )
accumulator([tf.constant([-2.0, 1.0] )] )
accumulator([tf.constant([-1.0, 2.0] )] )
with self.assertRaises(SCREAMING_SNAKE_CASE__ ):
accumulator([tf.constant([1.0, 1.0] ), tf.constant([2.0, 2.0] )] )
self.assertEqual(accumulator.step , 3 )
self.assertEqual(len(accumulator.gradients ) , 1 )
self.assertListAlmostEqual(accumulator.gradients[0].numpy().tolist() , [-2.0, 5.0] , tol=1e-2 )
accumulator.reset()
self.assertEqual(accumulator.step , 0 )
self.assertListAlmostEqual(accumulator.gradients[0].numpy().tolist() , [0.0, 0.0] , tol=1e-2 )
def __lowerCAmelCase ( self : Dict ):
'''simple docstring'''
__a : int = None
ops.enable_eager_execution_internal()
__a : Optional[Any] = tf.config.list_physical_devices('CPU' )
if len(SCREAMING_SNAKE_CASE__ ) == 1:
tf.config.set_logical_device_configuration(
physical_devices[0] , [tf.config.LogicalDeviceConfiguration(), tf.config.LogicalDeviceConfiguration()] )
__a : int = tf.config.list_logical_devices(device_type='CPU' )
__a : str = tf.distribute.MirroredStrategy(devices=devices[:2] )
with strategy.scope():
__a : List[str] = GradientAccumulator()
__a : Tuple = tf.Variable([4.0, 3.0] )
__a , __a : int = create_optimizer(5e-5 , 1_0 , 5 )
__a : List[Any] = tf.Variable([0.0, 0.0] , trainable=SCREAMING_SNAKE_CASE__ )
def accumulate_on_replica(SCREAMING_SNAKE_CASE__ : Optional[Any] ):
accumulator([gradient] )
def apply_on_replica():
optimizer.apply_gradients(list(zip(accumulator.gradients , [variable] ) ) )
@tf.function
def accumulate(SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Tuple ):
with strategy.scope():
__a : Optional[Any] = strategy.experimental_local_results(SCREAMING_SNAKE_CASE__ )
local_variables[0].assign(SCREAMING_SNAKE_CASE__ )
local_variables[1].assign(SCREAMING_SNAKE_CASE__ )
strategy.run(SCREAMING_SNAKE_CASE__ , args=(gradient_placeholder,) )
@tf.function
def apply_grad():
with strategy.scope():
strategy.run(SCREAMING_SNAKE_CASE__ )
def _check_local_values(SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : int ):
__a : Union[str, Any] = strategy.experimental_local_results(accumulator._gradients[0] )
self.assertListAlmostEqual(values[0].value() , SCREAMING_SNAKE_CASE__ , tol=1e-2 )
self.assertListAlmostEqual(values[1].value() , SCREAMING_SNAKE_CASE__ , tol=1e-2 )
accumulate([1.0, 2.0] , [-1.0, 1.0] )
accumulate([3.0, -1.0] , [-1.0, -1.0] )
accumulate([-2.0, 2.0] , [3.0, -2.0] )
self.assertEqual(accumulator.step , 3 )
_check_local_values([2.0, 3.0] , [1.0, -2.0] )
apply_grad()
self.assertListAlmostEqual(variable.value() , [4.0, 3.0] , tol=1e-2 )
accumulator.reset()
self.assertEqual(accumulator.step , 0 )
_check_local_values([0.0, 0.0] , [0.0, 0.0] )
| 47 | 1 |
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ = {
'''asapp/sew-d-tiny-100k''': '''https://huggingface.co/asapp/sew-d-tiny-100k/resolve/main/config.json''',
# See all SEW-D models at https://huggingface.co/models?filter=sew-d
}
class _UpperCamelCase( __lowerCamelCase ):
__SCREAMING_SNAKE_CASE : Optional[Any] = '''sew-d'''
def __init__( self : Dict , SCREAMING_SNAKE_CASE__ : Dict=3_2 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=7_6_8 , SCREAMING_SNAKE_CASE__ : Optional[Any]=1_2 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=1_2 , SCREAMING_SNAKE_CASE__ : str=3_0_7_2 , SCREAMING_SNAKE_CASE__ : str=2 , SCREAMING_SNAKE_CASE__ : Optional[Any]=5_1_2 , SCREAMING_SNAKE_CASE__ : List[str]=2_5_6 , SCREAMING_SNAKE_CASE__ : Optional[Any]=True , SCREAMING_SNAKE_CASE__ : Tuple=True , SCREAMING_SNAKE_CASE__ : List[str]=("p2c", "c2p") , SCREAMING_SNAKE_CASE__ : str="layer_norm" , SCREAMING_SNAKE_CASE__ : Tuple="gelu_python" , SCREAMING_SNAKE_CASE__ : Tuple=0.1 , SCREAMING_SNAKE_CASE__ : Optional[int]=0.1 , SCREAMING_SNAKE_CASE__ : Any=0.1 , SCREAMING_SNAKE_CASE__ : Any=0.0 , SCREAMING_SNAKE_CASE__ : str=0.1 , SCREAMING_SNAKE_CASE__ : Optional[int]=0.02 , SCREAMING_SNAKE_CASE__ : int=1e-7 , SCREAMING_SNAKE_CASE__ : Any=1e-5 , SCREAMING_SNAKE_CASE__ : Optional[int]="group" , SCREAMING_SNAKE_CASE__ : Optional[Any]="gelu" , SCREAMING_SNAKE_CASE__ : Optional[int]=(6_4, 1_2_8, 1_2_8, 1_2_8, 1_2_8, 2_5_6, 2_5_6, 2_5_6, 2_5_6, 5_1_2, 5_1_2, 5_1_2, 5_1_2) , SCREAMING_SNAKE_CASE__ : List[Any]=(5, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1) , SCREAMING_SNAKE_CASE__ : str=(1_0, 3, 1, 3, 1, 3, 1, 3, 1, 2, 1, 2, 1) , SCREAMING_SNAKE_CASE__ : Optional[int]=False , SCREAMING_SNAKE_CASE__ : Optional[int]=1_2_8 , SCREAMING_SNAKE_CASE__ : Tuple=1_6 , SCREAMING_SNAKE_CASE__ : Any=True , SCREAMING_SNAKE_CASE__ : List[Any]=0.05 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=1_0 , SCREAMING_SNAKE_CASE__ : List[Any]=2 , SCREAMING_SNAKE_CASE__ : int=0.0 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=1_0 , SCREAMING_SNAKE_CASE__ : Optional[int]=0 , SCREAMING_SNAKE_CASE__ : Optional[int]="mean" , SCREAMING_SNAKE_CASE__ : List[Any]=False , SCREAMING_SNAKE_CASE__ : List[str]=False , SCREAMING_SNAKE_CASE__ : str=2_5_6 , SCREAMING_SNAKE_CASE__ : str=0 , SCREAMING_SNAKE_CASE__ : List[Any]=1 , SCREAMING_SNAKE_CASE__ : List[Any]=2 , **SCREAMING_SNAKE_CASE__ : Any , ):
'''simple docstring'''
super().__init__(**SCREAMING_SNAKE_CASE__ , pad_token_id=SCREAMING_SNAKE_CASE__ , bos_token_id=SCREAMING_SNAKE_CASE__ , eos_token_id=SCREAMING_SNAKE_CASE__ )
__a : Optional[int] = hidden_size
__a : Optional[Any] = feat_extract_norm
__a : List[str] = feat_extract_activation
__a : Dict = list(SCREAMING_SNAKE_CASE__ )
__a : Union[str, Any] = list(SCREAMING_SNAKE_CASE__ )
__a : List[str] = list(SCREAMING_SNAKE_CASE__ )
__a : int = conv_bias
__a : Tuple = num_conv_pos_embeddings
__a : List[str] = num_conv_pos_embedding_groups
__a : Optional[Any] = len(self.conv_dim )
__a : Union[str, Any] = num_hidden_layers
__a : Optional[Any] = intermediate_size
__a : Union[str, Any] = squeeze_factor
__a : List[Any] = max_position_embeddings
__a : Tuple = position_buckets
__a : Optional[int] = share_att_key
__a : List[str] = relative_attention
__a : Any = norm_rel_ebd
__a : Any = list(SCREAMING_SNAKE_CASE__ )
__a : Union[str, Any] = hidden_act
__a : str = num_attention_heads
__a : Union[str, Any] = hidden_dropout
__a : Optional[int] = attention_dropout
__a : List[str] = activation_dropout
__a : int = feat_proj_dropout
__a : int = final_dropout
__a : Dict = layer_norm_eps
__a : Tuple = feature_layer_norm_eps
__a : str = initializer_range
__a : Tuple = vocab_size
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
'Configuration for convolutional layers is incorrect.'
'It is required that `len(config.conv_dim)` == `len(config.conv_stride)` == `len(config.conv_kernel)`,'
f'''but is `len(config.conv_dim) = {len(self.conv_dim )}`, `len(config.conv_stride)'''
f'''= {len(self.conv_stride )}`, `len(config.conv_kernel) = {len(self.conv_kernel )}`.''' )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
__a : Tuple = apply_spec_augment
__a : Optional[Any] = mask_time_prob
__a : Any = mask_time_length
__a : List[str] = mask_time_min_masks
__a : List[str] = mask_feature_prob
__a : Tuple = mask_feature_length
__a : Any = mask_feature_min_masks
# ctc loss
__a : Optional[int] = ctc_loss_reduction
__a : List[Any] = ctc_zero_infinity
# sequence classification
__a : Dict = use_weighted_layer_sum
__a : Optional[Any] = classifier_proj_size
@property
def __lowerCAmelCase ( self : Optional[Any] ):
'''simple docstring'''
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 47 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ = {
'''roberta-base''': '''https://huggingface.co/roberta-base/resolve/main/config.json''',
'''roberta-large''': '''https://huggingface.co/roberta-large/resolve/main/config.json''',
'''roberta-large-mnli''': '''https://huggingface.co/roberta-large-mnli/resolve/main/config.json''',
'''distilroberta-base''': '''https://huggingface.co/distilroberta-base/resolve/main/config.json''',
'''roberta-base-openai-detector''': '''https://huggingface.co/roberta-base-openai-detector/resolve/main/config.json''',
'''roberta-large-openai-detector''': '''https://huggingface.co/roberta-large-openai-detector/resolve/main/config.json''',
}
class _UpperCamelCase( __lowerCamelCase ):
__SCREAMING_SNAKE_CASE : Union[str, Any] = '''roberta'''
def __init__( self : List[Any] , SCREAMING_SNAKE_CASE__ : Union[str, Any]=5_0_2_6_5 , SCREAMING_SNAKE_CASE__ : Optional[int]=7_6_8 , SCREAMING_SNAKE_CASE__ : str=1_2 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=1_2 , SCREAMING_SNAKE_CASE__ : Optional[Any]=3_0_7_2 , SCREAMING_SNAKE_CASE__ : Any="gelu" , SCREAMING_SNAKE_CASE__ : Optional[Any]=0.1 , SCREAMING_SNAKE_CASE__ : str=0.1 , SCREAMING_SNAKE_CASE__ : Optional[Any]=5_1_2 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=2 , SCREAMING_SNAKE_CASE__ : Any=0.02 , SCREAMING_SNAKE_CASE__ : List[str]=1e-12 , SCREAMING_SNAKE_CASE__ : Optional[Any]=1 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=0 , SCREAMING_SNAKE_CASE__ : List[str]=2 , SCREAMING_SNAKE_CASE__ : Tuple="absolute" , SCREAMING_SNAKE_CASE__ : Any=True , SCREAMING_SNAKE_CASE__ : List[str]=None , **SCREAMING_SNAKE_CASE__ : Any , ):
'''simple docstring'''
super().__init__(pad_token_id=SCREAMING_SNAKE_CASE__ , bos_token_id=SCREAMING_SNAKE_CASE__ , eos_token_id=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
__a : Optional[Any] = vocab_size
__a : Tuple = hidden_size
__a : List[str] = num_hidden_layers
__a : List[Any] = num_attention_heads
__a : str = hidden_act
__a : Optional[Any] = intermediate_size
__a : Dict = hidden_dropout_prob
__a : List[str] = attention_probs_dropout_prob
__a : Optional[Any] = max_position_embeddings
__a : Dict = type_vocab_size
__a : str = initializer_range
__a : List[str] = layer_norm_eps
__a : Optional[int] = position_embedding_type
__a : Union[str, Any] = use_cache
__a : str = classifier_dropout
class _UpperCamelCase( __lowerCamelCase ):
@property
def __lowerCAmelCase ( self : Any ):
'''simple docstring'''
if self.task == "multiple-choice":
__a : List[str] = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
__a : Dict = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
] )
| 47 | 1 |
from math import atan, cos, radians, sin, tan
from .haversine_distance import haversine_distance
SCREAMING_SNAKE_CASE__ = 6_3_7_8_1_3_7.0
SCREAMING_SNAKE_CASE__ = 6_3_5_6_7_5_2.3_1_4_2_4_5
SCREAMING_SNAKE_CASE__ = 637_8137
def UpperCAmelCase__ ( lowerCamelCase_ : float , lowerCamelCase_ : float , lowerCamelCase_ : float , lowerCamelCase_ : float ):
__a : List[Any] = (AXIS_A - AXIS_B) / AXIS_A
# Parametric latitudes
# https://en.wikipedia.org/wiki/Latitude#Parametric_(or_reduced)_latitude
__a : Tuple = atan((1 - flattening) * tan(radians(lowerCamelCase_ ) ) )
__a : Any = atan((1 - flattening) * tan(radians(lowerCamelCase_ ) ) )
# Compute central angle between two points
# using haversine theta. sigma = haversine_distance / equatorial radius
__a : Optional[int] = haversine_distance(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) / EQUATORIAL_RADIUS
# Intermediate P and Q values
__a : Tuple = (b_lata + b_lata) / 2
__a : str = (b_lata - b_lata) / 2
# Intermediate X value
# X = (sigma - sin(sigma)) * sin^2Pcos^2Q / cos^2(sigma/2)
__a : Optional[Any] = (sin(lowerCamelCase_ ) ** 2) * (cos(lowerCamelCase_ ) ** 2)
__a : int = cos(sigma / 2 ) ** 2
__a : Any = (sigma - sin(lowerCamelCase_ )) * (x_numerator / x_demonimator)
# Intermediate Y value
# Y = (sigma + sin(sigma)) * cos^2Psin^2Q / sin^2(sigma/2)
__a : List[Any] = (cos(lowerCamelCase_ ) ** 2) * (sin(lowerCamelCase_ ) ** 2)
__a : int = sin(sigma / 2 ) ** 2
__a : List[Any] = (sigma + sin(lowerCamelCase_ )) * (y_numerator / y_denominator)
return EQUATORIAL_RADIUS * (sigma - ((flattening / 2) * (x_value + y_value)))
if __name__ == "__main__":
import doctest
doctest.testmod()
| 47 |
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ = '''▁'''
SCREAMING_SNAKE_CASE__ = {'''vocab_file''': '''sentencepiece.bpe.model'''}
SCREAMING_SNAKE_CASE__ = {
'''vocab_file''': {
'''facebook/xglm-564M''': '''https://huggingface.co/facebook/xglm-564M/resolve/main/sentencepiece.bpe.model''',
}
}
SCREAMING_SNAKE_CASE__ = {
'''facebook/xglm-564M''': 2048,
}
class _UpperCamelCase( __lowerCamelCase ):
__SCREAMING_SNAKE_CASE : Optional[Any] = VOCAB_FILES_NAMES
__SCREAMING_SNAKE_CASE : Union[str, Any] = PRETRAINED_VOCAB_FILES_MAP
__SCREAMING_SNAKE_CASE : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__SCREAMING_SNAKE_CASE : Any = ['''input_ids''', '''attention_mask''']
def __init__( self : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : List[Any]="<s>" , SCREAMING_SNAKE_CASE__ : List[str]="</s>" , SCREAMING_SNAKE_CASE__ : Optional[Any]="</s>" , SCREAMING_SNAKE_CASE__ : Union[str, Any]="<s>" , SCREAMING_SNAKE_CASE__ : str="<unk>" , SCREAMING_SNAKE_CASE__ : Dict="<pad>" , SCREAMING_SNAKE_CASE__ : Optional[Dict[str, Any]] = None , **SCREAMING_SNAKE_CASE__ : List[str] , ):
'''simple docstring'''
__a : Union[str, Any] = {} if sp_model_kwargs is None else sp_model_kwargs
# Compatibility with the original tokenizer
__a : Any = 7
__a : Union[str, Any] = [f'''<madeupword{i}>''' for i in range(self.num_madeup_words )]
__a : Union[str, Any] = kwargs.get('additional_special_tokens' , [] )
kwargs["additional_special_tokens"] += [
word for word in madeup_words if word not in kwargs["additional_special_tokens"]
]
super().__init__(
bos_token=SCREAMING_SNAKE_CASE__ , eos_token=SCREAMING_SNAKE_CASE__ , unk_token=SCREAMING_SNAKE_CASE__ , sep_token=SCREAMING_SNAKE_CASE__ , cls_token=SCREAMING_SNAKE_CASE__ , pad_token=SCREAMING_SNAKE_CASE__ , sp_model_kwargs=self.sp_model_kwargs , **SCREAMING_SNAKE_CASE__ , )
__a : List[str] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(SCREAMING_SNAKE_CASE__ ) )
__a : str = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
__a : Any = 1
# Mimic fairseq token-to-id alignment for the first 4 token
__a : str = {'<s>': 0, '<pad>': 1, '</s>': 2, '<unk>': 3}
__a : List[str] = len(self.sp_model )
__a : Optional[int] = {f'''<madeupword{i}>''': sp_size + i + self.fairseq_offset for i in range(self.num_madeup_words )}
self.fairseq_tokens_to_ids.update(SCREAMING_SNAKE_CASE__ )
__a : Dict = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def __getstate__( self : List[str] ):
'''simple docstring'''
__a : Tuple = self.__dict__.copy()
__a : List[str] = None
__a : Optional[int] = self.sp_model.serialized_model_proto()
return state
def __setstate__( self : List[str] , SCREAMING_SNAKE_CASE__ : Dict ):
'''simple docstring'''
__a : int = d
# for backward compatibility
if not hasattr(self , 'sp_model_kwargs' ):
__a : Dict = {}
__a : Tuple = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
def __lowerCAmelCase ( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : List[int] , SCREAMING_SNAKE_CASE__ : Optional[List[int]] = None ):
'''simple docstring'''
if token_ids_a is None:
return [self.sep_token_id] + token_ids_a
__a : Optional[Any] = [self.sep_token_id]
return sep + token_ids_a + sep + sep + token_ids_a
def __lowerCAmelCase ( self : Dict , SCREAMING_SNAKE_CASE__ : List[int] , SCREAMING_SNAKE_CASE__ : Optional[List[int]] = None , SCREAMING_SNAKE_CASE__ : bool = False ):
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=SCREAMING_SNAKE_CASE__ , token_ids_a=SCREAMING_SNAKE_CASE__ , already_has_special_tokens=SCREAMING_SNAKE_CASE__ )
if token_ids_a is None:
return [1] + ([0] * len(SCREAMING_SNAKE_CASE__ ))
return [1] + ([0] * len(SCREAMING_SNAKE_CASE__ )) + [1, 1] + ([0] * len(SCREAMING_SNAKE_CASE__ ))
def __lowerCAmelCase ( self : List[Any] , SCREAMING_SNAKE_CASE__ : List[int] , SCREAMING_SNAKE_CASE__ : Optional[List[int]] = None ):
'''simple docstring'''
__a : Optional[int] = [self.sep_token_id]
if token_ids_a is None:
return len(sep + token_ids_a ) * [0]
return len(sep + token_ids_a + sep + sep + token_ids_a ) * [0]
@property
def __lowerCAmelCase ( self : Optional[int] ):
'''simple docstring'''
return len(self.sp_model ) + self.fairseq_offset + self.num_madeup_words
def __lowerCAmelCase ( self : Tuple ):
'''simple docstring'''
__a : str = {self.convert_ids_to_tokens(SCREAMING_SNAKE_CASE__ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __lowerCAmelCase ( self : Any , SCREAMING_SNAKE_CASE__ : str ):
'''simple docstring'''
return self.sp_model.encode(SCREAMING_SNAKE_CASE__ , out_type=SCREAMING_SNAKE_CASE__ )
def __lowerCAmelCase ( self : List[Any] , SCREAMING_SNAKE_CASE__ : List[Any] ):
'''simple docstring'''
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
__a : List[str] = self.sp_model.PieceToId(SCREAMING_SNAKE_CASE__ )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def __lowerCAmelCase ( self : Optional[int] , SCREAMING_SNAKE_CASE__ : Dict ):
'''simple docstring'''
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def __lowerCAmelCase ( self : Tuple , SCREAMING_SNAKE_CASE__ : List[Any] ):
'''simple docstring'''
__a : Optional[int] = ''.join(SCREAMING_SNAKE_CASE__ ).replace(SCREAMING_SNAKE_CASE__ , ' ' ).strip()
return out_string
def __lowerCAmelCase ( self : Dict , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Optional[str] = None ):
'''simple docstring'''
if not os.path.isdir(SCREAMING_SNAKE_CASE__ ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
__a : Any = os.path.join(
SCREAMING_SNAKE_CASE__ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(SCREAMING_SNAKE_CASE__ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , SCREAMING_SNAKE_CASE__ )
elif not os.path.isfile(self.vocab_file ):
with open(SCREAMING_SNAKE_CASE__ , 'wb' ) as fi:
__a : List[Any] = self.sp_model.serialized_model_proto()
fi.write(SCREAMING_SNAKE_CASE__ )
return (out_vocab_file,)
| 47 | 1 |
import argparse
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate
# and perform gradient accumulation
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
SCREAMING_SNAKE_CASE__ = 16
SCREAMING_SNAKE_CASE__ = 32
def UpperCAmelCase__ ( lowerCamelCase_ : Accelerator , lowerCamelCase_ : int = 1_6 ):
__a : List[Any] = AutoTokenizer.from_pretrained('bert-base-cased' )
__a : int = load_dataset('glue' , 'mrpc' )
def tokenize_function(lowerCamelCase_ : int ):
# max_length=None => use the model max length (it's actually the default)
__a : Dict = tokenizer(examples['sentence1'] , examples['sentence2'] , truncation=lowerCamelCase_ , max_length=lowerCamelCase_ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
__a : str = datasets.map(
lowerCamelCase_ , batched=lowerCamelCase_ , remove_columns=['idx', 'sentence1', 'sentence2'] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
__a : Union[str, Any] = tokenized_datasets.rename_column('label' , 'labels' )
def collate_fn(lowerCamelCase_ : Union[str, Any] ):
# On TPU it's best to pad everything to the same length or training will be very slow.
__a : Tuple = 1_2_8 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
__a : Any = 1_6
elif accelerator.mixed_precision != "no":
__a : Optional[Any] = 8
else:
__a : str = None
return tokenizer.pad(
lowerCamelCase_ , padding='longest' , max_length=lowerCamelCase_ , pad_to_multiple_of=lowerCamelCase_ , return_tensors='pt' , )
# Instantiate dataloaders.
__a : Union[str, Any] = DataLoader(
tokenized_datasets['train'] , shuffle=lowerCamelCase_ , collate_fn=lowerCamelCase_ , batch_size=lowerCamelCase_ )
__a : Dict = DataLoader(
tokenized_datasets['validation'] , shuffle=lowerCamelCase_ , collate_fn=lowerCamelCase_ , batch_size=lowerCamelCase_ )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get('''TESTING_MOCKED_DATALOADERS''', None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
SCREAMING_SNAKE_CASE__ = mocked_dataloaders # noqa: F811
def UpperCAmelCase__ ( lowerCamelCase_ : Tuple , lowerCamelCase_ : Dict ):
# For testing only
if os.environ.get('TESTING_MOCKED_DATALOADERS' , lowerCamelCase_ ) == "1":
__a : List[str] = 2
# New Code #
__a : Dict = int(args.gradient_accumulation_steps )
# Initialize accelerator
__a : Optional[Any] = Accelerator(
cpu=args.cpu , mixed_precision=args.mixed_precision , gradient_accumulation_steps=lowerCamelCase_ )
if accelerator.distributed_type == DistributedType.TPU and gradient_accumulation_steps > 1:
raise NotImplementedError(
'Gradient accumulation on TPUs is currently not supported. Pass `gradient_accumulation_steps=1`' )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
__a : Union[str, Any] = config['lr']
__a : List[Any] = int(config['num_epochs'] )
__a : Optional[int] = int(config['seed'] )
__a : Dict = int(config['batch_size'] )
__a : Optional[int] = evaluate.load('glue' , 'mrpc' )
set_seed(lowerCamelCase_ )
__a , __a : List[Any] = get_dataloaders(lowerCamelCase_ , lowerCamelCase_ )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
__a : int = AutoModelForSequenceClassification.from_pretrained('bert-base-cased' , return_dict=lowerCamelCase_ )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
__a : Any = model.to(accelerator.device )
# Instantiate optimizer
__a : Dict = AdamW(params=model.parameters() , lr=lowerCamelCase_ )
# Instantiate scheduler
__a : List[str] = get_linear_schedule_with_warmup(
optimizer=lowerCamelCase_ , num_warmup_steps=1_0_0 , num_training_steps=(len(lowerCamelCase_ ) * num_epochs) , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
__a , __a , __a , __a , __a : Dict = accelerator.prepare(
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
# Now we train the model
for epoch in range(lowerCamelCase_ ):
model.train()
for step, batch in enumerate(lowerCamelCase_ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
# New code #
# We use the new `accumulate` context manager to perform gradient accumulation
# We also currently do not support TPUs nor advise it as bugs were found on the XLA side when running our tests.
with accelerator.accumulate(lowerCamelCase_ ):
__a : Any = model(**lowerCamelCase_ )
__a : Dict = output.loss
accelerator.backward(lowerCamelCase_ )
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(lowerCamelCase_ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
__a : Any = model(**lowerCamelCase_ )
__a : Optional[Any] = outputs.logits.argmax(dim=-1 )
__a , __a : Dict = accelerator.gather_for_metrics((predictions, batch['labels']) )
metric.add_batch(
predictions=lowerCamelCase_ , references=lowerCamelCase_ , )
__a : Optional[int] = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(f'''epoch {epoch}:''' , lowerCamelCase_ )
def UpperCAmelCase__ ( ):
__a : Union[str, Any] = argparse.ArgumentParser(description='Simple example of training script.' )
parser.add_argument(
'--mixed_precision' , type=lowerCamelCase_ , default=lowerCamelCase_ , choices=['no', 'fp16', 'bf16', 'fp8'] , help='Whether to use mixed precision. Choose'
'between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.'
'and an Nvidia Ampere GPU.' , )
# New Code #
parser.add_argument(
'--gradient_accumulation_steps' , type=lowerCamelCase_ , default=1 , help='The number of minibatches to be ran before gradients are accumulated.' , )
parser.add_argument('--cpu' , action='store_true' , help='If passed, will train on the CPU.' )
__a : Any = parser.parse_args()
__a : Optional[Any] = {'lr': 2e-5, 'num_epochs': 3, 'seed': 4_2, 'batch_size': 1_6}
training_function(lowerCamelCase_ , lowerCamelCase_ )
if __name__ == "__main__":
main()
| 47 |
import argparse
from collections import OrderedDict
from pathlib import Path
import torch
from transformers import (
VisualBertConfig,
VisualBertForMultipleChoice,
VisualBertForPreTraining,
VisualBertForQuestionAnswering,
VisualBertForVisualReasoning,
)
from transformers.utils import logging
logging.set_verbosity_info()
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ = [
('''bert.bert''', '''visual_bert'''),
('''bert.cls''', '''cls'''),
('''bert.classifier''', '''cls'''),
('''token_type_embeddings_visual''', '''visual_token_type_embeddings'''),
('''position_embeddings_visual''', '''visual_position_embeddings'''),
('''projection''', '''visual_projection'''),
]
SCREAMING_SNAKE_CASE__ = [
'''nlvr2_coco_pre_trained.th''',
'''nlvr2_fine_tuned.th''',
'''nlvr2_pre_trained.th''',
'''vcr_coco_pre_train.th''',
'''vcr_fine_tune.th''',
'''vcr_pre_train.th''',
'''vqa_coco_pre_trained.th''',
'''vqa_fine_tuned.th''',
'''vqa_pre_trained.th''',
]
def UpperCAmelCase__ ( lowerCamelCase_ : Optional[int] ):
__a : str = torch.load(lowerCamelCase_ , map_location='cpu' )
return sd
def UpperCAmelCase__ ( lowerCamelCase_ : List[Any] , lowerCamelCase_ : int , lowerCamelCase_ : Dict=rename_keys_prefix ):
__a : Optional[Any] = OrderedDict()
__a : Any = torch.arange(config.max_position_embeddings ).expand((1, -1) )
# detector_d = OrderedDict()
for key in d:
if "detector" in key:
# detector_d[key.replace('detector.','')] = d[key]
continue
__a : List[Any] = key
for name_pair in rename_keys_prefix:
__a : List[str] = new_key.replace(name_pair[0] , name_pair[1] )
__a : Any = d[key]
if key == "bert.cls.predictions.decoder.weight":
# Old bert code didn't have `decoder.bias`, but was added separately
__a : int = new_d['cls.predictions.bias']
return new_d
@torch.no_grad()
def UpperCAmelCase__ ( lowerCamelCase_ : Any , lowerCamelCase_ : Any ):
assert (
checkpoint_path.split('/' )[-1] in ACCEPTABLE_CHECKPOINTS
), f'''The checkpoint provided must be in {ACCEPTABLE_CHECKPOINTS}.'''
# Get Config
if "pre" in checkpoint_path:
__a : Dict = 'pretraining'
if "vcr" in checkpoint_path:
__a : int = {'visual_embedding_dim': 5_1_2}
elif "vqa_advanced" in checkpoint_path:
__a : int = {'visual_embedding_dim': 2_0_4_8}
elif "vqa" in checkpoint_path:
__a : Tuple = {'visual_embedding_dim': 2_0_4_8}
elif "nlvr" in checkpoint_path:
__a : List[Any] = {'visual_embedding_dim': 1_0_2_4}
else:
raise NotImplementedError(f'''No implementation found for `{checkpoint_path}`.''' )
else:
if "vcr" in checkpoint_path:
__a : int = {'visual_embedding_dim': 5_1_2}
__a : Any = 'multichoice'
elif "vqa_advanced" in checkpoint_path:
__a : Any = {'visual_embedding_dim': 2_0_4_8}
__a : List[str] = 'vqa_advanced'
elif "vqa" in checkpoint_path:
__a : List[Any] = {'visual_embedding_dim': 2_0_4_8, 'num_labels': 3_1_2_9}
__a : List[Any] = 'vqa'
elif "nlvr" in checkpoint_path:
__a : Optional[int] = {
'visual_embedding_dim': 1_0_2_4,
'num_labels': 2,
}
__a : Optional[Any] = 'nlvr'
__a : str = VisualBertConfig(**lowerCamelCase_ )
# Load State Dict
__a : str = load_state_dict(lowerCamelCase_ )
__a : str = get_new_dict(lowerCamelCase_ , lowerCamelCase_ )
if model_type == "pretraining":
__a : Optional[Any] = VisualBertForPreTraining(lowerCamelCase_ )
elif model_type == "vqa":
__a : Any = VisualBertForQuestionAnswering(lowerCamelCase_ )
elif model_type == "nlvr":
__a : int = VisualBertForVisualReasoning(lowerCamelCase_ )
elif model_type == "multichoice":
__a : Optional[int] = VisualBertForMultipleChoice(lowerCamelCase_ )
model.load_state_dict(lowerCamelCase_ )
# Save Checkpoints
Path(lowerCamelCase_ ).mkdir(exist_ok=lowerCamelCase_ )
model.save_pretrained(lowerCamelCase_ )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument('''orig_checkpoint_path''', type=str, help='''A path to .th on local filesystem.''')
parser.add_argument('''pytorch_dump_folder_path''', type=str, help='''Path to the output PyTorch model.''')
SCREAMING_SNAKE_CASE__ = parser.parse_args()
convert_visual_bert_checkpoint(args.orig_checkpoint_path, args.pytorch_dump_folder_path)
| 47 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ = {'''openai-gpt''': '''https://huggingface.co/openai-gpt/resolve/main/config.json'''}
class _UpperCamelCase( __lowerCamelCase ):
__SCREAMING_SNAKE_CASE : Any = '''openai-gpt'''
__SCREAMING_SNAKE_CASE : Union[str, Any] = {
'''max_position_embeddings''': '''n_positions''',
'''hidden_size''': '''n_embd''',
'''num_attention_heads''': '''n_head''',
'''num_hidden_layers''': '''n_layer''',
}
def __init__( self : Tuple , SCREAMING_SNAKE_CASE__ : Optional[Any]=4_0_4_7_8 , SCREAMING_SNAKE_CASE__ : Dict=5_1_2 , SCREAMING_SNAKE_CASE__ : Optional[Any]=7_6_8 , SCREAMING_SNAKE_CASE__ : List[str]=1_2 , SCREAMING_SNAKE_CASE__ : str=1_2 , SCREAMING_SNAKE_CASE__ : Union[str, Any]="gelu" , SCREAMING_SNAKE_CASE__ : Union[str, Any]=0.1 , SCREAMING_SNAKE_CASE__ : Tuple=0.1 , SCREAMING_SNAKE_CASE__ : List[Any]=0.1 , SCREAMING_SNAKE_CASE__ : Optional[Any]=1e-5 , SCREAMING_SNAKE_CASE__ : Dict=0.02 , SCREAMING_SNAKE_CASE__ : Optional[int]="cls_index" , SCREAMING_SNAKE_CASE__ : Tuple=True , SCREAMING_SNAKE_CASE__ : Tuple=None , SCREAMING_SNAKE_CASE__ : str=True , SCREAMING_SNAKE_CASE__ : str=0.1 , **SCREAMING_SNAKE_CASE__ : Union[str, Any] , ):
'''simple docstring'''
__a : Union[str, Any] = vocab_size
__a : List[str] = n_positions
__a : Union[str, Any] = n_embd
__a : List[str] = n_layer
__a : Optional[Any] = n_head
__a : Any = afn
__a : Union[str, Any] = resid_pdrop
__a : str = embd_pdrop
__a : str = attn_pdrop
__a : Union[str, Any] = layer_norm_epsilon
__a : Optional[int] = initializer_range
__a : Optional[int] = summary_type
__a : str = summary_use_proj
__a : Any = summary_activation
__a : List[str] = summary_first_dropout
__a : List[str] = summary_proj_to_labels
super().__init__(**SCREAMING_SNAKE_CASE__ )
| 47 |
print((lambda quine: quine % quine)('''print((lambda quine: quine %% quine)(%r))'''))
| 47 | 1 |
import math
import flax.linen as nn
import jax.numpy as jnp
def UpperCAmelCase__ ( lowerCamelCase_ : jnp.ndarray , lowerCamelCase_ : int , lowerCamelCase_ : float = 1 , lowerCamelCase_ : float = 1 , lowerCamelCase_ : float = 1.0e4 , lowerCamelCase_ : bool = False , lowerCamelCase_ : float = 1.0 , ):
assert timesteps.ndim == 1, "Timesteps should be a 1d-array"
assert embedding_dim % 2 == 0, f'''Embedding dimension {embedding_dim} should be even'''
__a : Dict = float(embedding_dim // 2 )
__a : str = math.log(max_timescale / min_timescale ) / (num_timescales - freq_shift)
__a : Any = min_timescale * jnp.exp(jnp.arange(lowerCamelCase_ , dtype=jnp.floataa ) * -log_timescale_increment )
__a : List[Any] = jnp.expand_dims(lowerCamelCase_ , 1 ) * jnp.expand_dims(lowerCamelCase_ , 0 )
# scale embeddings
__a : Optional[int] = scale * emb
if flip_sin_to_cos:
__a : Any = jnp.concatenate([jnp.cos(lowerCamelCase_ ), jnp.sin(lowerCamelCase_ )] , axis=1 )
else:
__a : Tuple = jnp.concatenate([jnp.sin(lowerCamelCase_ ), jnp.cos(lowerCamelCase_ )] , axis=1 )
__a : str = jnp.reshape(lowerCamelCase_ , [jnp.shape(lowerCamelCase_ )[0], embedding_dim] )
return signal
class _UpperCamelCase( nn.Module ):
__SCREAMING_SNAKE_CASE : int = 32
__SCREAMING_SNAKE_CASE : jnp.dtype = jnp.floataa
@nn.compact
def __call__( self : List[str] , SCREAMING_SNAKE_CASE__ : str ):
'''simple docstring'''
__a : int = nn.Dense(self.time_embed_dim , dtype=self.dtype , name='linear_1' )(SCREAMING_SNAKE_CASE__ )
__a : str = nn.silu(SCREAMING_SNAKE_CASE__ )
__a : Tuple = nn.Dense(self.time_embed_dim , dtype=self.dtype , name='linear_2' )(SCREAMING_SNAKE_CASE__ )
return temb
class _UpperCamelCase( nn.Module ):
__SCREAMING_SNAKE_CASE : int = 32
__SCREAMING_SNAKE_CASE : bool = False
__SCREAMING_SNAKE_CASE : float = 1
@nn.compact
def __call__( self : str , SCREAMING_SNAKE_CASE__ : str ):
'''simple docstring'''
return get_sinusoidal_embeddings(
SCREAMING_SNAKE_CASE__ , embedding_dim=self.dim , flip_sin_to_cos=self.flip_sin_to_cos , freq_shift=self.freq_shift )
| 47 |
import json
import os
import shutil
import tempfile
from unittest import TestCase
from transformers import BartTokenizer, BartTokenizerFast, DPRQuestionEncoderTokenizer, DPRQuestionEncoderTokenizerFast
from transformers.models.bart.configuration_bart import BartConfig
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES as DPR_VOCAB_FILES_NAMES
from transformers.models.dpr.configuration_dpr import DPRConfig
from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES as BART_VOCAB_FILES_NAMES
from transformers.testing_utils import require_faiss, require_tokenizers, require_torch, slow
from transformers.utils import is_datasets_available, is_faiss_available, is_torch_available
if is_torch_available() and is_datasets_available() and is_faiss_available():
from transformers.models.rag.configuration_rag import RagConfig
from transformers.models.rag.tokenization_rag import RagTokenizer
@require_faiss
@require_torch
class _UpperCamelCase( __lowerCamelCase ):
def __lowerCAmelCase ( self : List[Any] ):
'''simple docstring'''
__a : List[Any] = tempfile.mkdtemp()
__a : int = 8
# DPR tok
__a : Dict = [
'[UNK]',
'[CLS]',
'[SEP]',
'[PAD]',
'[MASK]',
'want',
'##want',
'##ed',
'wa',
'un',
'runn',
'##ing',
',',
'low',
'lowest',
]
__a : int = os.path.join(self.tmpdirname , 'dpr_tokenizer' )
os.makedirs(SCREAMING_SNAKE_CASE__ , exist_ok=SCREAMING_SNAKE_CASE__ )
__a : Dict = os.path.join(SCREAMING_SNAKE_CASE__ , DPR_VOCAB_FILES_NAMES['vocab_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) )
# BART tok
__a : str = [
'l',
'o',
'w',
'e',
'r',
's',
't',
'i',
'd',
'n',
'\u0120',
'\u0120l',
'\u0120n',
'\u0120lo',
'\u0120low',
'er',
'\u0120lowest',
'\u0120newer',
'\u0120wider',
'<unk>',
]
__a : Optional[int] = dict(zip(SCREAMING_SNAKE_CASE__ , range(len(SCREAMING_SNAKE_CASE__ ) ) ) )
__a : List[str] = ['#version: 0.2', '\u0120 l', '\u0120l o', '\u0120lo w', 'e r', '']
__a : List[str] = {'unk_token': '<unk>'}
__a : Dict = os.path.join(self.tmpdirname , 'bart_tokenizer' )
os.makedirs(SCREAMING_SNAKE_CASE__ , exist_ok=SCREAMING_SNAKE_CASE__ )
__a : List[Any] = os.path.join(SCREAMING_SNAKE_CASE__ , BART_VOCAB_FILES_NAMES['vocab_file'] )
__a : Dict = os.path.join(SCREAMING_SNAKE_CASE__ , BART_VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp:
fp.write(json.dumps(SCREAMING_SNAKE_CASE__ ) + '\n' )
with open(self.merges_file , 'w' , encoding='utf-8' ) as fp:
fp.write('\n'.join(SCREAMING_SNAKE_CASE__ ) )
def __lowerCAmelCase ( self : Tuple ):
'''simple docstring'''
return DPRQuestionEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname , 'dpr_tokenizer' ) )
def __lowerCAmelCase ( self : str ):
'''simple docstring'''
return BartTokenizer.from_pretrained(os.path.join(self.tmpdirname , 'bart_tokenizer' ) )
def __lowerCAmelCase ( self : Union[str, Any] ):
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
@require_tokenizers
def __lowerCAmelCase ( self : Dict ):
'''simple docstring'''
__a : Tuple = os.path.join(self.tmpdirname , 'rag_tokenizer' )
__a : Optional[Any] = RagConfig(question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() )
__a : Optional[Any] = RagTokenizer(question_encoder=self.get_dpr_tokenizer() , generator=self.get_bart_tokenizer() )
rag_config.save_pretrained(SCREAMING_SNAKE_CASE__ )
rag_tokenizer.save_pretrained(SCREAMING_SNAKE_CASE__ )
__a : List[Any] = RagTokenizer.from_pretrained(SCREAMING_SNAKE_CASE__ , config=SCREAMING_SNAKE_CASE__ )
self.assertIsInstance(new_rag_tokenizer.question_encoder , SCREAMING_SNAKE_CASE__ )
self.assertEqual(new_rag_tokenizer.question_encoder.get_vocab() , rag_tokenizer.question_encoder.get_vocab() )
self.assertIsInstance(new_rag_tokenizer.generator , SCREAMING_SNAKE_CASE__ )
self.assertEqual(new_rag_tokenizer.generator.get_vocab() , rag_tokenizer.generator.get_vocab() )
@slow
def __lowerCAmelCase ( self : Optional[Any] ):
'''simple docstring'''
__a : Optional[Any] = RagTokenizer.from_pretrained('facebook/rag-token-nq' )
__a : List[Any] = [
'who got the first nobel prize in physics',
'when is the next deadpool movie being released',
'which mode is used for short wave broadcast service',
'who is the owner of reading football club',
'when is the next scandal episode coming out',
'when is the last time the philadelphia won the superbowl',
'what is the most current adobe flash player version',
'how many episodes are there in dragon ball z',
'what is the first step in the evolution of the eye',
'where is gall bladder situated in human body',
'what is the main mineral in lithium batteries',
'who is the president of usa right now',
'where do the greasers live in the outsiders',
'panda is a national animal of which country',
'what is the name of manchester united stadium',
]
__a : Tuple = tokenizer(SCREAMING_SNAKE_CASE__ )
self.assertIsNotNone(SCREAMING_SNAKE_CASE__ )
@slow
def __lowerCAmelCase ( self : Optional[int] ):
'''simple docstring'''
__a : Any = RagTokenizer.from_pretrained('facebook/rag-sequence-nq' )
__a : Union[str, Any] = [
'who got the first nobel prize in physics',
'when is the next deadpool movie being released',
'which mode is used for short wave broadcast service',
'who is the owner of reading football club',
'when is the next scandal episode coming out',
'when is the last time the philadelphia won the superbowl',
'what is the most current adobe flash player version',
'how many episodes are there in dragon ball z',
'what is the first step in the evolution of the eye',
'where is gall bladder situated in human body',
'what is the main mineral in lithium batteries',
'who is the president of usa right now',
'where do the greasers live in the outsiders',
'panda is a national animal of which country',
'what is the name of manchester united stadium',
]
__a : str = tokenizer(SCREAMING_SNAKE_CASE__ )
self.assertIsNotNone(SCREAMING_SNAKE_CASE__ )
| 47 | 1 |
SCREAMING_SNAKE_CASE__ = tuple[float, float, float]
SCREAMING_SNAKE_CASE__ = tuple[float, float, float]
def UpperCAmelCase__ ( lowerCamelCase_ : Pointad , lowerCamelCase_ : Pointad ):
__a : int = end_pointa[0] - end_pointa[0]
__a : List[Any] = end_pointa[1] - end_pointa[1]
__a : List[str] = end_pointa[2] - end_pointa[2]
return (x, y, z)
def UpperCAmelCase__ ( lowerCamelCase_ : Vectorad , lowerCamelCase_ : Vectorad ):
__a : List[Any] = ab[1] * ac[2] - ab[2] * ac[1] # *i
__a : List[str] = (ab[0] * ac[2] - ab[2] * ac[0]) * -1 # *j
__a : List[str] = ab[0] * ac[1] - ab[1] * ac[0] # *k
return (x, y, z)
def UpperCAmelCase__ ( lowerCamelCase_ : Vectorad , lowerCamelCase_ : int ):
return tuple(round(lowerCamelCase_ , lowerCamelCase_ ) for x in vector ) == (0, 0, 0)
def UpperCAmelCase__ ( lowerCamelCase_ : Pointad , lowerCamelCase_ : Pointad , lowerCamelCase_ : Pointad , lowerCamelCase_ : int = 1_0 ):
__a : Dict = create_vector(lowerCamelCase_ , lowerCamelCase_ )
__a : Union[str, Any] = create_vector(lowerCamelCase_ , lowerCamelCase_ )
return is_zero_vector(get_ad_vectors_cross(lowerCamelCase_ , lowerCamelCase_ ) , lowerCamelCase_ )
| 47 |
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ = {'''vocab_file''': '''spiece.model'''}
SCREAMING_SNAKE_CASE__ = {
'''vocab_file''': {
'''bert_for_seq_generation''': (
'''https://huggingface.co/google/bert_for_seq_generation_L-24_bbc_encoder/resolve/main/spiece.model'''
),
}
}
SCREAMING_SNAKE_CASE__ = {'''bert_for_seq_generation''': 512}
class _UpperCamelCase( __lowerCamelCase ):
__SCREAMING_SNAKE_CASE : int = VOCAB_FILES_NAMES
__SCREAMING_SNAKE_CASE : List[str] = PRETRAINED_VOCAB_FILES_MAP
__SCREAMING_SNAKE_CASE : Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__SCREAMING_SNAKE_CASE : List[int] = []
__SCREAMING_SNAKE_CASE : int = ['''input_ids''', '''attention_mask''']
def __init__( self : str , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : List[Any]="<s>" , SCREAMING_SNAKE_CASE__ : Tuple="</s>" , SCREAMING_SNAKE_CASE__ : Any="<unk>" , SCREAMING_SNAKE_CASE__ : int="<pad>" , SCREAMING_SNAKE_CASE__ : List[str]="<::::>" , SCREAMING_SNAKE_CASE__ : Optional[Dict[str, Any]] = None , **SCREAMING_SNAKE_CASE__ : Tuple , ):
'''simple docstring'''
__a : Tuple = {} if sp_model_kwargs is None else sp_model_kwargs
# Add extra_ids to the special token list
super().__init__(
bos_token=SCREAMING_SNAKE_CASE__ , eos_token=SCREAMING_SNAKE_CASE__ , unk_token=SCREAMING_SNAKE_CASE__ , pad_token=SCREAMING_SNAKE_CASE__ , sep_token=SCREAMING_SNAKE_CASE__ , sp_model_kwargs=self.sp_model_kwargs , **SCREAMING_SNAKE_CASE__ , )
__a : int = vocab_file
__a : Union[str, Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(SCREAMING_SNAKE_CASE__ )
@property
def __lowerCAmelCase ( self : Optional[int] ):
'''simple docstring'''
return self.sp_model.get_piece_size()
def __lowerCAmelCase ( self : int ):
'''simple docstring'''
__a : Dict = {self.convert_ids_to_tokens(SCREAMING_SNAKE_CASE__ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : Optional[Any] ):
'''simple docstring'''
__a : Union[str, Any] = self.__dict__.copy()
__a : Any = None
return state
def __setstate__( self : int , SCREAMING_SNAKE_CASE__ : List[str] ):
'''simple docstring'''
__a : str = d
# for backward compatibility
if not hasattr(self , 'sp_model_kwargs' ):
__a : str = {}
__a : List[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def __lowerCAmelCase ( self : Dict , SCREAMING_SNAKE_CASE__ : str ):
'''simple docstring'''
return self.sp_model.encode(SCREAMING_SNAKE_CASE__ , out_type=SCREAMING_SNAKE_CASE__ )
def __lowerCAmelCase ( self : str , SCREAMING_SNAKE_CASE__ : Optional[int] ):
'''simple docstring'''
return self.sp_model.piece_to_id(SCREAMING_SNAKE_CASE__ )
def __lowerCAmelCase ( self : int , SCREAMING_SNAKE_CASE__ : str ):
'''simple docstring'''
__a : int = self.sp_model.IdToPiece(SCREAMING_SNAKE_CASE__ )
return token
def __lowerCAmelCase ( self : Tuple , SCREAMING_SNAKE_CASE__ : Optional[int] ):
'''simple docstring'''
__a : Optional[Any] = []
__a : Optional[int] = ''
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(SCREAMING_SNAKE_CASE__ ) + token
__a : Dict = []
else:
current_sub_tokens.append(SCREAMING_SNAKE_CASE__ )
out_string += self.sp_model.decode(SCREAMING_SNAKE_CASE__ )
return out_string.strip()
def __lowerCAmelCase ( self : List[Any] , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Optional[str] = None ):
'''simple docstring'''
if not os.path.isdir(SCREAMING_SNAKE_CASE__ ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
__a : Tuple = os.path.join(
SCREAMING_SNAKE_CASE__ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(SCREAMING_SNAKE_CASE__ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , SCREAMING_SNAKE_CASE__ )
elif not os.path.isfile(self.vocab_file ):
with open(SCREAMING_SNAKE_CASE__ , 'wb' ) as fi:
__a : List[str] = self.sp_model.serialized_model_proto()
fi.write(SCREAMING_SNAKE_CASE__ )
return (out_vocab_file,)
| 47 | 1 |
import re
import string
import numpy as np
import datasets
SCREAMING_SNAKE_CASE__ = '''
Returns the rate at which the input predicted strings exactly match their references, ignoring any strings input as part of the regexes_to_ignore list.
'''
SCREAMING_SNAKE_CASE__ = '''
Args:
predictions: List of predicted texts.
references: List of reference texts.
regexes_to_ignore: List, defaults to None. Regex expressions of characters to
ignore when calculating the exact matches. Note: these regexes are removed
from the input data before the changes based on the options below (e.g. ignore_case,
ignore_punctuation, ignore_numbers) are applied.
ignore_case: Boolean, defaults to False. If true, turns everything
to lowercase so that capitalization differences are ignored.
ignore_punctuation: Boolean, defaults to False. If true, removes all punctuation before
comparing predictions and references.
ignore_numbers: Boolean, defaults to False. If true, removes all punctuation before
comparing predictions and references.
Returns:
exact_match: Dictionary containing exact_match rate. Possible values are between 0.0 and 100.0, inclusive.
Examples:
>>> exact_match = datasets.load_metric("exact_match")
>>> refs = ["the cat", "theater", "YELLING", "agent007"]
>>> preds = ["cat?", "theater", "yelling", "agent"]
>>> results = exact_match.compute(references=refs, predictions=preds)
>>> print(round(results["exact_match"], 1))
25.0
>>> exact_match = datasets.load_metric("exact_match")
>>> refs = ["the cat", "theater", "YELLING", "agent007"]
>>> preds = ["cat?", "theater", "yelling", "agent"]
>>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=["the ", "yell"], ignore_case=True, ignore_punctuation=True)
>>> print(round(results["exact_match"], 1))
50.0
>>> exact_match = datasets.load_metric("exact_match")
>>> refs = ["the cat", "theater", "YELLING", "agent007"]
>>> preds = ["cat?", "theater", "yelling", "agent"]
>>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=["the ", "yell", "YELL"], ignore_case=True, ignore_punctuation=True)
>>> print(round(results["exact_match"], 1))
75.0
>>> exact_match = datasets.load_metric("exact_match")
>>> refs = ["the cat", "theater", "YELLING", "agent007"]
>>> preds = ["cat?", "theater", "yelling", "agent"]
>>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=["the ", "yell", "YELL"], ignore_case=True, ignore_punctuation=True, ignore_numbers=True)
>>> print(round(results["exact_match"], 1))
100.0
>>> exact_match = datasets.load_metric("exact_match")
>>> refs = ["The cat sat on the mat.", "Theaters are great.", "It\'s like comparing oranges and apples."]
>>> preds = ["The cat sat on the mat?", "Theaters are great.", "It\'s like comparing apples and oranges."]
>>> results = exact_match.compute(references=refs, predictions=preds)
>>> print(round(results["exact_match"], 1))
33.3
'''
SCREAMING_SNAKE_CASE__ = '''
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _UpperCamelCase( datasets.Metric ):
def __lowerCAmelCase ( self : Dict ):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Value('string' , id='sequence' ),
'references': datasets.Value('string' , id='sequence' ),
} ) , reference_urls=[] , )
def __lowerCAmelCase ( self : Union[str, Any] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : str=None , SCREAMING_SNAKE_CASE__ : List[Any]=False , SCREAMING_SNAKE_CASE__ : Optional[int]=False , SCREAMING_SNAKE_CASE__ : List[str]=False , ):
'''simple docstring'''
if regexes_to_ignore is not None:
for s in regexes_to_ignore:
__a : Optional[int] = np.array([re.sub(SCREAMING_SNAKE_CASE__ , '' , SCREAMING_SNAKE_CASE__ ) for x in predictions] )
__a : Any = np.array([re.sub(SCREAMING_SNAKE_CASE__ , '' , SCREAMING_SNAKE_CASE__ ) for x in references] )
else:
__a : List[Any] = np.asarray(SCREAMING_SNAKE_CASE__ )
__a : int = np.asarray(SCREAMING_SNAKE_CASE__ )
if ignore_case:
__a : Optional[int] = np.char.lower(SCREAMING_SNAKE_CASE__ )
__a : str = np.char.lower(SCREAMING_SNAKE_CASE__ )
if ignore_punctuation:
__a : Tuple = string.punctuation.maketrans('' , '' , string.punctuation )
__a : Optional[Any] = np.char.translate(SCREAMING_SNAKE_CASE__ , table=SCREAMING_SNAKE_CASE__ )
__a : Any = np.char.translate(SCREAMING_SNAKE_CASE__ , table=SCREAMING_SNAKE_CASE__ )
if ignore_numbers:
__a : Tuple = string.digits.maketrans('' , '' , string.digits )
__a : str = np.char.translate(SCREAMING_SNAKE_CASE__ , table=SCREAMING_SNAKE_CASE__ )
__a : Tuple = np.char.translate(SCREAMING_SNAKE_CASE__ , table=SCREAMING_SNAKE_CASE__ )
__a : Optional[int] = predictions == references
return {"exact_match": np.mean(SCREAMING_SNAKE_CASE__ ) * 1_0_0}
| 47 |
from ..utils import DummyObject, requires_backends
class _UpperCamelCase( metaclass=__lowerCamelCase ):
__SCREAMING_SNAKE_CASE : Optional[Any] = ['''torch''', '''transformers''', '''onnx''']
def __init__( self : Dict , *SCREAMING_SNAKE_CASE__ : Any , **SCREAMING_SNAKE_CASE__ : Any ):
'''simple docstring'''
requires_backends(self , ['torch', 'transformers', 'onnx'] )
@classmethod
def __lowerCAmelCase ( cls : Tuple , *SCREAMING_SNAKE_CASE__ : Optional[Any] , **SCREAMING_SNAKE_CASE__ : Optional[int] ):
'''simple docstring'''
requires_backends(cls , ['torch', 'transformers', 'onnx'] )
@classmethod
def __lowerCAmelCase ( cls : Any , *SCREAMING_SNAKE_CASE__ : Optional[Any] , **SCREAMING_SNAKE_CASE__ : Tuple ):
'''simple docstring'''
requires_backends(cls , ['torch', 'transformers', 'onnx'] )
class _UpperCamelCase( metaclass=__lowerCamelCase ):
__SCREAMING_SNAKE_CASE : List[Any] = ['''torch''', '''transformers''', '''onnx''']
def __init__( self : Any , *SCREAMING_SNAKE_CASE__ : int , **SCREAMING_SNAKE_CASE__ : List[Any] ):
'''simple docstring'''
requires_backends(self , ['torch', 'transformers', 'onnx'] )
@classmethod
def __lowerCAmelCase ( cls : List[str] , *SCREAMING_SNAKE_CASE__ : Tuple , **SCREAMING_SNAKE_CASE__ : int ):
'''simple docstring'''
requires_backends(cls , ['torch', 'transformers', 'onnx'] )
@classmethod
def __lowerCAmelCase ( cls : str , *SCREAMING_SNAKE_CASE__ : int , **SCREAMING_SNAKE_CASE__ : List[Any] ):
'''simple docstring'''
requires_backends(cls , ['torch', 'transformers', 'onnx'] )
class _UpperCamelCase( metaclass=__lowerCamelCase ):
__SCREAMING_SNAKE_CASE : Any = ['''torch''', '''transformers''', '''onnx''']
def __init__( self : Tuple , *SCREAMING_SNAKE_CASE__ : List[str] , **SCREAMING_SNAKE_CASE__ : List[Any] ):
'''simple docstring'''
requires_backends(self , ['torch', 'transformers', 'onnx'] )
@classmethod
def __lowerCAmelCase ( cls : str , *SCREAMING_SNAKE_CASE__ : Any , **SCREAMING_SNAKE_CASE__ : Union[str, Any] ):
'''simple docstring'''
requires_backends(cls , ['torch', 'transformers', 'onnx'] )
@classmethod
def __lowerCAmelCase ( cls : Dict , *SCREAMING_SNAKE_CASE__ : List[Any] , **SCREAMING_SNAKE_CASE__ : int ):
'''simple docstring'''
requires_backends(cls , ['torch', 'transformers', 'onnx'] )
class _UpperCamelCase( metaclass=__lowerCamelCase ):
__SCREAMING_SNAKE_CASE : Dict = ['''torch''', '''transformers''', '''onnx''']
def __init__( self : Any , *SCREAMING_SNAKE_CASE__ : Dict , **SCREAMING_SNAKE_CASE__ : int ):
'''simple docstring'''
requires_backends(self , ['torch', 'transformers', 'onnx'] )
@classmethod
def __lowerCAmelCase ( cls : int , *SCREAMING_SNAKE_CASE__ : Tuple , **SCREAMING_SNAKE_CASE__ : Tuple ):
'''simple docstring'''
requires_backends(cls , ['torch', 'transformers', 'onnx'] )
@classmethod
def __lowerCAmelCase ( cls : Any , *SCREAMING_SNAKE_CASE__ : int , **SCREAMING_SNAKE_CASE__ : str ):
'''simple docstring'''
requires_backends(cls , ['torch', 'transformers', 'onnx'] )
class _UpperCamelCase( metaclass=__lowerCamelCase ):
__SCREAMING_SNAKE_CASE : int = ['''torch''', '''transformers''', '''onnx''']
def __init__( self : List[str] , *SCREAMING_SNAKE_CASE__ : Tuple , **SCREAMING_SNAKE_CASE__ : List[Any] ):
'''simple docstring'''
requires_backends(self , ['torch', 'transformers', 'onnx'] )
@classmethod
def __lowerCAmelCase ( cls : int , *SCREAMING_SNAKE_CASE__ : Tuple , **SCREAMING_SNAKE_CASE__ : Union[str, Any] ):
'''simple docstring'''
requires_backends(cls , ['torch', 'transformers', 'onnx'] )
@classmethod
def __lowerCAmelCase ( cls : Optional[Any] , *SCREAMING_SNAKE_CASE__ : List[Any] , **SCREAMING_SNAKE_CASE__ : int ):
'''simple docstring'''
requires_backends(cls , ['torch', 'transformers', 'onnx'] )
class _UpperCamelCase( metaclass=__lowerCamelCase ):
__SCREAMING_SNAKE_CASE : Optional[int] = ['''torch''', '''transformers''', '''onnx''']
def __init__( self : Optional[int] , *SCREAMING_SNAKE_CASE__ : Dict , **SCREAMING_SNAKE_CASE__ : Any ):
'''simple docstring'''
requires_backends(self , ['torch', 'transformers', 'onnx'] )
@classmethod
def __lowerCAmelCase ( cls : Optional[Any] , *SCREAMING_SNAKE_CASE__ : Tuple , **SCREAMING_SNAKE_CASE__ : Union[str, Any] ):
'''simple docstring'''
requires_backends(cls , ['torch', 'transformers', 'onnx'] )
@classmethod
def __lowerCAmelCase ( cls : int , *SCREAMING_SNAKE_CASE__ : Optional[Any] , **SCREAMING_SNAKE_CASE__ : Tuple ):
'''simple docstring'''
requires_backends(cls , ['torch', 'transformers', 'onnx'] )
| 47 | 1 |
import argparse
from tax import checkpoints
from transformers import AutoConfig, FlaxAutoModelForSeqaSeqLM
def UpperCAmelCase__ ( lowerCamelCase_ : Dict , lowerCamelCase_ : Optional[Any] , lowerCamelCase_ : Dict ):
__a : Tuple = AutoConfig.from_pretrained(lowerCamelCase_ )
__a : Optional[int] = FlaxAutoModelForSeqaSeqLM.from_config(config=lowerCamelCase_ )
__a : Union[str, Any] = checkpoints.load_tax_checkpoint(lowerCamelCase_ )
__a : str = 'wi_0' in tax_model['target']['encoder']['layers_0']['mlp']
if config.model_type == "t5":
__a : Dict = 'SelfAttention'
if config.model_type == "longt5" and config.encoder_attention_type == "local":
__a : int = 'LocalSelfAttention'
elif config.model_type == "longt5" and config.encoder_attention_type == "transient-global":
__a : Any = 'TransientGlobalSelfAttention'
else:
raise ValueError(
'Given config is expected to have `model_type=\'t5\'`, or `model_type=\'longt5` with `encoder_attention_type`'
' attribute with a value from [\'local\', \'transient-global].' )
# Encoder
for layer_index in range(config.num_layers ):
__a : List[Any] = f'''layers_{str(lowerCamelCase_ )}'''
# Self-Attention
__a : str = tax_model['target']['encoder'][layer_name]['attention']['key']['kernel']
__a : List[str] = tax_model['target']['encoder'][layer_name]['attention']['out']['kernel']
__a : int = tax_model['target']['encoder'][layer_name]['attention']['query']['kernel']
__a : List[Any] = tax_model['target']['encoder'][layer_name]['attention']['value']['kernel']
# Global input layer norm
if config.model_type == "longt5" and config.encoder_attention_type == "transient-global":
__a : str = tax_model['target']['encoder'][layer_name]['attention']['T5LayerNorm_0']['scale']
# Layer Normalization
__a : Union[str, Any] = tax_model['target']['encoder'][layer_name]['pre_attention_layer_norm']['scale']
if split_mlp_wi:
__a : str = tax_model['target']['encoder'][layer_name]['mlp']['wi_0']['kernel']
__a : List[str] = tax_model['target']['encoder'][layer_name]['mlp']['wi_1']['kernel']
else:
__a : int = tax_model['target']['encoder'][layer_name]['mlp']['wi']['kernel']
__a : int = tax_model['target']['encoder'][layer_name]['mlp']['wo']['kernel']
# Layer Normalization
__a : Optional[Any] = tax_model['target']['encoder'][layer_name]['pre_mlp_layer_norm']['scale']
# Assigning
__a : int = flax_model.params['encoder']['block'][str(lowerCamelCase_ )]['layer']
__a : str = tax_attention_key
__a : Dict = tax_attention_out
__a : Union[str, Any] = tax_attention_query
__a : List[str] = tax_attention_value
__a : str = tax_attention_layer_norm
# Global input layer norm
if config.model_type == "longt5" and config.encoder_attention_type == "transient-global":
__a : Any = tax_global_layer_norm
if split_mlp_wi:
__a : Any = tax_mlp_wi_a
__a : Optional[int] = tax_mlp_wi_a
else:
__a : Union[str, Any] = tax_mlp_wi
__a : List[Any] = tax_mlp_wo
__a : List[Any] = tax_mlp_layer_norm
__a : Optional[Any] = flax_model_encoder_layer_block
# Only for layer 0:
__a : Dict = tax_model['target']['encoder']['relpos_bias']['rel_embedding'].T
__a : Optional[int] = tax_encoder_rel_embedding
# Side/global relative position_bias + layer norm
if config.model_type == "longt5" and config.encoder_attention_type == "transient-global":
__a : Union[str, Any] = tax_model['target']['encoder']['side_relpos_bias']['rel_embedding'].T
__a : Optional[Any] = tax_encoder_global_rel_embedding
# Assigning
__a : List[str] = tax_model['target']['encoder']['encoder_norm']['scale']
__a : Tuple = tax_encoder_norm
# Decoder
for layer_index in range(config.num_layers ):
__a : Dict = f'''layers_{str(lowerCamelCase_ )}'''
# Self-Attention
__a : Any = tax_model['target']['decoder'][layer_name]['self_attention']['key']['kernel']
__a : Any = tax_model['target']['decoder'][layer_name]['self_attention']['out']['kernel']
__a : List[Any] = tax_model['target']['decoder'][layer_name]['self_attention']['query']['kernel']
__a : Optional[Any] = tax_model['target']['decoder'][layer_name]['self_attention']['value']['kernel']
# Layer Normalization
__a : int = tax_model['target']['decoder'][layer_name]['pre_self_attention_layer_norm'][
'scale'
]
# Encoder-Decoder-Attention
__a : str = tax_model['target']['decoder'][layer_name]['encoder_decoder_attention']
__a : Union[str, Any] = tax_enc_dec_attention_module['key']['kernel']
__a : List[Any] = tax_enc_dec_attention_module['out']['kernel']
__a : Optional[int] = tax_enc_dec_attention_module['query']['kernel']
__a : List[Any] = tax_enc_dec_attention_module['value']['kernel']
# Layer Normalization
__a : int = tax_model['target']['decoder'][layer_name]['pre_cross_attention_layer_norm']['scale']
# MLP
if split_mlp_wi:
__a : Tuple = tax_model['target']['decoder'][layer_name]['mlp']['wi_0']['kernel']
__a : Tuple = tax_model['target']['decoder'][layer_name]['mlp']['wi_1']['kernel']
else:
__a : Dict = tax_model['target']['decoder'][layer_name]['mlp']['wi']['kernel']
__a : int = tax_model['target']['decoder'][layer_name]['mlp']['wo']['kernel']
# Layer Normalization
__a : List[str] = tax_model['target']['decoder'][layer_name]['pre_mlp_layer_norm']['scale']
# Assigning
__a : Union[str, Any] = flax_model.params['decoder']['block'][str(lowerCamelCase_ )]['layer']
__a : List[str] = tax_attention_key
__a : Any = tax_attention_out
__a : List[Any] = tax_attention_query
__a : Optional[Any] = tax_attention_value
__a : Any = tax_pre_attention_layer_norm
__a : int = tax_enc_dec_attention_key
__a : Optional[Any] = tax_enc_dec_attention_out
__a : List[str] = tax_enc_dec_attention_query
__a : str = tax_enc_dec_attention_value
__a : Dict = tax_cross_layer_norm
if split_mlp_wi:
__a : Union[str, Any] = tax_mlp_wi_a
__a : int = tax_mlp_wi_a
else:
__a : int = tax_mlp_wi
__a : Dict = tax_mlp_wo
__a : Union[str, Any] = txa_mlp_layer_norm
__a : Optional[int] = flax_model_decoder_layer_block
# Decoder Normalization
__a : Tuple = tax_model['target']['decoder']['decoder_norm']['scale']
__a : Optional[Any] = txa_decoder_norm
# Only for layer 0:
__a : Optional[int] = tax_model['target']['decoder']['relpos_bias']['rel_embedding'].T
__a : Optional[Any] = tax_decoder_rel_embedding
# Token Embeddings
__a : Tuple = tax_model['target']['token_embedder']['embedding']
__a : List[Any] = txa_token_embeddings
# LM Head (only in v1.1 and LongT5 checkpoints)
if "logits_dense" in tax_model["target"]["decoder"]:
__a : str = tax_model['target']['decoder']['logits_dense']['kernel']
flax_model.save_pretrained(lowerCamelCase_ )
print('T5X Model was sucessfully converted!' )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--t5x_checkpoint_path''', default=None, type=str, required=True, help='''Path the T5X checkpoint.'''
)
parser.add_argument('''--config_name''', default=None, type=str, required=True, help='''Config name of LongT5/T5 model.''')
parser.add_argument(
'''--flax_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output FLAX model.'''
)
SCREAMING_SNAKE_CASE__ = parser.parse_args()
convert_tax_checkpoint_to_flax(args.tax_checkpoint_path, args.config_name, args.flax_dump_folder_path)
| 47 |
import math
from datetime import datetime, timedelta
def UpperCAmelCase__ ( lowerCamelCase_ : int ):
__a : Union[str, Any] = year % 1_9
__a : int = year % 4
__a : Optional[int] = year % 7
__a : Dict = math.floor(year / 1_0_0 )
__a : Optional[Any] = math.floor((1_3 + 8 * leap_day_inhibits) / 2_5 )
__a : Union[str, Any] = leap_day_inhibits / 4
__a : str = (
1_5 - lunar_orbit_correction + leap_day_inhibits - leap_day_reinstall_number
) % 3_0
__a : Union[str, Any] = (4 + leap_day_inhibits - leap_day_reinstall_number) % 7
# days to be added to March 21
__a : List[Any] = (1_9 * metonic_cycle + secular_moon_shift) % 3_0
# PHM -> Paschal Full Moon
__a : List[Any] = (
2 * julian_leap_year
+ 4 * non_leap_year
+ 6 * days_to_add
+ century_starting_point
) % 7
if days_to_add == 2_9 and days_from_phm_to_sunday == 6:
return datetime(lowerCamelCase_ , 4 , 1_9 )
elif days_to_add == 2_8 and days_from_phm_to_sunday == 6:
return datetime(lowerCamelCase_ , 4 , 1_8 )
else:
return datetime(lowerCamelCase_ , 3 , 2_2 ) + timedelta(
days=int(days_to_add + days_from_phm_to_sunday ) )
if __name__ == "__main__":
for year in (1994, 2000, 2010, 2021, 2023):
SCREAMING_SNAKE_CASE__ = '''will be''' if year > datetime.now().year else '''was'''
print(F"Easter in {year} {tense} {gauss_easter(year)}")
| 47 | 1 |
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ = {'''vocab_file''': '''spiece.model'''}
SCREAMING_SNAKE_CASE__ = {
'''vocab_file''': {
'''bert_for_seq_generation''': (
'''https://huggingface.co/google/bert_for_seq_generation_L-24_bbc_encoder/resolve/main/spiece.model'''
),
}
}
SCREAMING_SNAKE_CASE__ = {'''bert_for_seq_generation''': 512}
class _UpperCamelCase( __lowerCamelCase ):
__SCREAMING_SNAKE_CASE : int = VOCAB_FILES_NAMES
__SCREAMING_SNAKE_CASE : List[str] = PRETRAINED_VOCAB_FILES_MAP
__SCREAMING_SNAKE_CASE : Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__SCREAMING_SNAKE_CASE : List[int] = []
__SCREAMING_SNAKE_CASE : int = ['''input_ids''', '''attention_mask''']
def __init__( self : str , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : List[Any]="<s>" , SCREAMING_SNAKE_CASE__ : Tuple="</s>" , SCREAMING_SNAKE_CASE__ : Any="<unk>" , SCREAMING_SNAKE_CASE__ : int="<pad>" , SCREAMING_SNAKE_CASE__ : List[str]="<::::>" , SCREAMING_SNAKE_CASE__ : Optional[Dict[str, Any]] = None , **SCREAMING_SNAKE_CASE__ : Tuple , ):
'''simple docstring'''
__a : Tuple = {} if sp_model_kwargs is None else sp_model_kwargs
# Add extra_ids to the special token list
super().__init__(
bos_token=SCREAMING_SNAKE_CASE__ , eos_token=SCREAMING_SNAKE_CASE__ , unk_token=SCREAMING_SNAKE_CASE__ , pad_token=SCREAMING_SNAKE_CASE__ , sep_token=SCREAMING_SNAKE_CASE__ , sp_model_kwargs=self.sp_model_kwargs , **SCREAMING_SNAKE_CASE__ , )
__a : int = vocab_file
__a : Union[str, Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(SCREAMING_SNAKE_CASE__ )
@property
def __lowerCAmelCase ( self : Optional[int] ):
'''simple docstring'''
return self.sp_model.get_piece_size()
def __lowerCAmelCase ( self : int ):
'''simple docstring'''
__a : Dict = {self.convert_ids_to_tokens(SCREAMING_SNAKE_CASE__ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : Optional[Any] ):
'''simple docstring'''
__a : Union[str, Any] = self.__dict__.copy()
__a : Any = None
return state
def __setstate__( self : int , SCREAMING_SNAKE_CASE__ : List[str] ):
'''simple docstring'''
__a : str = d
# for backward compatibility
if not hasattr(self , 'sp_model_kwargs' ):
__a : str = {}
__a : List[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def __lowerCAmelCase ( self : Dict , SCREAMING_SNAKE_CASE__ : str ):
'''simple docstring'''
return self.sp_model.encode(SCREAMING_SNAKE_CASE__ , out_type=SCREAMING_SNAKE_CASE__ )
def __lowerCAmelCase ( self : str , SCREAMING_SNAKE_CASE__ : Optional[int] ):
'''simple docstring'''
return self.sp_model.piece_to_id(SCREAMING_SNAKE_CASE__ )
def __lowerCAmelCase ( self : int , SCREAMING_SNAKE_CASE__ : str ):
'''simple docstring'''
__a : int = self.sp_model.IdToPiece(SCREAMING_SNAKE_CASE__ )
return token
def __lowerCAmelCase ( self : Tuple , SCREAMING_SNAKE_CASE__ : Optional[int] ):
'''simple docstring'''
__a : Optional[Any] = []
__a : Optional[int] = ''
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(SCREAMING_SNAKE_CASE__ ) + token
__a : Dict = []
else:
current_sub_tokens.append(SCREAMING_SNAKE_CASE__ )
out_string += self.sp_model.decode(SCREAMING_SNAKE_CASE__ )
return out_string.strip()
def __lowerCAmelCase ( self : List[Any] , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Optional[str] = None ):
'''simple docstring'''
if not os.path.isdir(SCREAMING_SNAKE_CASE__ ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
__a : Tuple = os.path.join(
SCREAMING_SNAKE_CASE__ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(SCREAMING_SNAKE_CASE__ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , SCREAMING_SNAKE_CASE__ )
elif not os.path.isfile(self.vocab_file ):
with open(SCREAMING_SNAKE_CASE__ , 'wb' ) as fi:
__a : List[str] = self.sp_model.serialized_model_proto()
fi.write(SCREAMING_SNAKE_CASE__ )
return (out_vocab_file,)
| 47 |
from typing import List, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ = {
'''huggingface/informer-tourism-monthly''': (
'''https://huggingface.co/huggingface/informer-tourism-monthly/resolve/main/config.json'''
),
# See all Informer models at https://huggingface.co/models?filter=informer
}
class _UpperCamelCase( __lowerCamelCase ):
__SCREAMING_SNAKE_CASE : List[Any] = '''informer'''
__SCREAMING_SNAKE_CASE : List[Any] = {
'''hidden_size''': '''d_model''',
'''num_attention_heads''': '''encoder_attention_heads''',
'''num_hidden_layers''': '''encoder_layers''',
}
def __init__( self : Optional[int] , SCREAMING_SNAKE_CASE__ : Optional[int] = None , SCREAMING_SNAKE_CASE__ : Optional[int] = None , SCREAMING_SNAKE_CASE__ : str = "student_t" , SCREAMING_SNAKE_CASE__ : str = "nll" , SCREAMING_SNAKE_CASE__ : int = 1 , SCREAMING_SNAKE_CASE__ : List[int] = None , SCREAMING_SNAKE_CASE__ : Optional[Union[str, bool]] = "mean" , SCREAMING_SNAKE_CASE__ : int = 0 , SCREAMING_SNAKE_CASE__ : int = 0 , SCREAMING_SNAKE_CASE__ : int = 0 , SCREAMING_SNAKE_CASE__ : int = 0 , SCREAMING_SNAKE_CASE__ : Optional[List[int]] = None , SCREAMING_SNAKE_CASE__ : Optional[List[int]] = None , SCREAMING_SNAKE_CASE__ : int = 6_4 , SCREAMING_SNAKE_CASE__ : int = 3_2 , SCREAMING_SNAKE_CASE__ : int = 3_2 , SCREAMING_SNAKE_CASE__ : int = 2 , SCREAMING_SNAKE_CASE__ : int = 2 , SCREAMING_SNAKE_CASE__ : int = 2 , SCREAMING_SNAKE_CASE__ : int = 2 , SCREAMING_SNAKE_CASE__ : bool = True , SCREAMING_SNAKE_CASE__ : str = "gelu" , SCREAMING_SNAKE_CASE__ : float = 0.05 , SCREAMING_SNAKE_CASE__ : float = 0.1 , SCREAMING_SNAKE_CASE__ : float = 0.1 , SCREAMING_SNAKE_CASE__ : float = 0.1 , SCREAMING_SNAKE_CASE__ : float = 0.1 , SCREAMING_SNAKE_CASE__ : int = 1_0_0 , SCREAMING_SNAKE_CASE__ : float = 0.02 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=True , SCREAMING_SNAKE_CASE__ : str = "prob" , SCREAMING_SNAKE_CASE__ : int = 5 , SCREAMING_SNAKE_CASE__ : bool = True , **SCREAMING_SNAKE_CASE__ : Tuple , ):
'''simple docstring'''
__a : Dict = prediction_length
__a : Tuple = context_length or prediction_length
__a : Tuple = distribution_output
__a : Tuple = loss
__a : str = input_size
__a : Dict = num_time_features
__a : Optional[int] = lags_sequence if lags_sequence is not None else [1, 2, 3, 4, 5, 6, 7]
__a : str = scaling
__a : Tuple = num_dynamic_real_features
__a : int = num_static_real_features
__a : Dict = num_static_categorical_features
# set cardinality
if cardinality and num_static_categorical_features > 0:
if len(SCREAMING_SNAKE_CASE__ ) != num_static_categorical_features:
raise ValueError(
'The cardinality should be a list of the same length as `num_static_categorical_features`' )
__a : Optional[Any] = cardinality
else:
__a : Optional[int] = [0]
# set embedding_dimension
if embedding_dimension and num_static_categorical_features > 0:
if len(SCREAMING_SNAKE_CASE__ ) != num_static_categorical_features:
raise ValueError(
'The embedding dimension should be a list of the same length as `num_static_categorical_features`' )
__a : int = embedding_dimension
else:
__a : List[Any] = [min(5_0 , (cat + 1) // 2 ) for cat in self.cardinality]
__a : int = num_parallel_samples
# Transformer architecture configuration
__a : str = input_size * len(self.lags_sequence ) + self._number_of_features
__a : Optional[int] = d_model
__a : Union[str, Any] = encoder_attention_heads
__a : int = decoder_attention_heads
__a : Any = encoder_ffn_dim
__a : Union[str, Any] = decoder_ffn_dim
__a : List[Any] = encoder_layers
__a : Optional[int] = decoder_layers
__a : int = dropout
__a : Optional[Any] = attention_dropout
__a : Dict = activation_dropout
__a : Union[str, Any] = encoder_layerdrop
__a : Optional[int] = decoder_layerdrop
__a : List[str] = activation_function
__a : str = init_std
__a : Optional[int] = use_cache
# Informer
__a : Union[str, Any] = attention_type
__a : str = sampling_factor
__a : Dict = distil
super().__init__(is_encoder_decoder=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
@property
def __lowerCAmelCase ( self : Any ):
'''simple docstring'''
return (
sum(self.embedding_dimension )
+ self.num_dynamic_real_features
+ self.num_time_features
+ self.num_static_real_features
+ self.input_size * 2 # the log1p(abs(loc)) and log(scale) features
)
| 47 | 1 |
import os
from pathlib import Path
def UpperCAmelCase__ ( ):
from torch.utils.cpp_extension import load
__a : Tuple = Path(lowerCamelCase_ ).resolve().parent.parent.parent / 'kernels' / 'deformable_detr'
__a : Tuple = [
root / filename
for filename in [
'vision.cpp',
os.path.join('cpu' , 'ms_deform_attn_cpu.cpp' ),
os.path.join('cuda' , 'ms_deform_attn_cuda.cu' ),
]
]
load(
'MultiScaleDeformableAttention' , lowerCamelCase_ , with_cuda=lowerCamelCase_ , extra_include_paths=[str(lowerCamelCase_ )] , extra_cflags=['-DWITH_CUDA=1'] , extra_cuda_cflags=[
'-DCUDA_HAS_FP16=1',
'-D__CUDA_NO_HALF_OPERATORS__',
'-D__CUDA_NO_HALF_CONVERSIONS__',
'-D__CUDA_NO_HALF2_OPERATORS__',
] , )
import MultiScaleDeformableAttention as MSDA
return MSDA
| 47 |
import torch
from diffusers import DDIMParallelScheduler
from .test_schedulers import SchedulerCommonTest
class _UpperCamelCase( __lowerCamelCase ):
__SCREAMING_SNAKE_CASE : int = (DDIMParallelScheduler,)
__SCREAMING_SNAKE_CASE : Union[str, Any] = (('''eta''', 0.0), ('''num_inference_steps''', 50))
def __lowerCAmelCase ( self : str , **SCREAMING_SNAKE_CASE__ : Optional[int] ):
'''simple docstring'''
__a : List[Any] = {
'num_train_timesteps': 1_0_0_0,
'beta_start': 0.0_001,
'beta_end': 0.02,
'beta_schedule': 'linear',
'clip_sample': True,
}
config.update(**SCREAMING_SNAKE_CASE__ )
return config
def __lowerCAmelCase ( self : str , **SCREAMING_SNAKE_CASE__ : Optional[int] ):
'''simple docstring'''
__a : Tuple = self.scheduler_classes[0]
__a : Optional[Any] = self.get_scheduler_config(**SCREAMING_SNAKE_CASE__ )
__a : Any = scheduler_class(**SCREAMING_SNAKE_CASE__ )
__a , __a : List[str] = 1_0, 0.0
__a : Dict = self.dummy_model()
__a : str = self.dummy_sample_deter
scheduler.set_timesteps(SCREAMING_SNAKE_CASE__ )
for t in scheduler.timesteps:
__a : str = model(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
__a : List[str] = scheduler.step(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ).prev_sample
return sample
def __lowerCAmelCase ( self : Tuple ):
'''simple docstring'''
for timesteps in [1_0_0, 5_0_0, 1_0_0_0]:
self.check_over_configs(num_train_timesteps=SCREAMING_SNAKE_CASE__ )
def __lowerCAmelCase ( self : Dict ):
'''simple docstring'''
for steps_offset in [0, 1]:
self.check_over_configs(steps_offset=SCREAMING_SNAKE_CASE__ )
__a : Optional[Any] = self.scheduler_classes[0]
__a : List[str] = self.get_scheduler_config(steps_offset=1 )
__a : Optional[Any] = scheduler_class(**SCREAMING_SNAKE_CASE__ )
scheduler.set_timesteps(5 )
assert torch.equal(scheduler.timesteps , torch.LongTensor([8_0_1, 6_0_1, 4_0_1, 2_0_1, 1] ) )
def __lowerCAmelCase ( self : Optional[int] ):
'''simple docstring'''
for beta_start, beta_end in zip([0.0_001, 0.001, 0.01, 0.1] , [0.002, 0.02, 0.2, 2] ):
self.check_over_configs(beta_start=SCREAMING_SNAKE_CASE__ , beta_end=SCREAMING_SNAKE_CASE__ )
def __lowerCAmelCase ( self : int ):
'''simple docstring'''
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=SCREAMING_SNAKE_CASE__ )
def __lowerCAmelCase ( self : Optional[Any] ):
'''simple docstring'''
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=SCREAMING_SNAKE_CASE__ )
def __lowerCAmelCase ( self : List[Any] ):
'''simple docstring'''
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=SCREAMING_SNAKE_CASE__ )
def __lowerCAmelCase ( self : Optional[Any] ):
'''simple docstring'''
for timestep_spacing in ["trailing", "leading"]:
self.check_over_configs(timestep_spacing=SCREAMING_SNAKE_CASE__ )
def __lowerCAmelCase ( self : int ):
'''simple docstring'''
for rescale_betas_zero_snr in [True, False]:
self.check_over_configs(rescale_betas_zero_snr=SCREAMING_SNAKE_CASE__ )
def __lowerCAmelCase ( self : str ):
'''simple docstring'''
self.check_over_configs(thresholding=SCREAMING_SNAKE_CASE__ )
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(
thresholding=SCREAMING_SNAKE_CASE__ , prediction_type=SCREAMING_SNAKE_CASE__ , sample_max_value=SCREAMING_SNAKE_CASE__ , )
def __lowerCAmelCase ( self : Optional[int] ):
'''simple docstring'''
for t in [1, 1_0, 4_9]:
self.check_over_forward(time_step=SCREAMING_SNAKE_CASE__ )
def __lowerCAmelCase ( self : List[str] ):
'''simple docstring'''
for t, num_inference_steps in zip([1, 1_0, 5_0] , [1_0, 5_0, 5_0_0] ):
self.check_over_forward(time_step=SCREAMING_SNAKE_CASE__ , num_inference_steps=SCREAMING_SNAKE_CASE__ )
def __lowerCAmelCase ( self : List[Any] ):
'''simple docstring'''
for t, eta in zip([1, 1_0, 4_9] , [0.0, 0.5, 1.0] ):
self.check_over_forward(time_step=SCREAMING_SNAKE_CASE__ , eta=SCREAMING_SNAKE_CASE__ )
def __lowerCAmelCase ( self : List[Any] ):
'''simple docstring'''
__a : List[str] = self.scheduler_classes[0]
__a : Union[str, Any] = self.get_scheduler_config()
__a : Any = scheduler_class(**SCREAMING_SNAKE_CASE__ )
assert torch.sum(torch.abs(scheduler._get_variance(0 , 0 ) - 0.0 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(4_2_0 , 4_0_0 ) - 0.14_771 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(9_8_0 , 9_6_0 ) - 0.32_460 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(0 , 0 ) - 0.0 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(4_8_7 , 4_8_6 ) - 0.00_979 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(9_9_9 , 9_9_8 ) - 0.02 ) ) < 1e-5
def __lowerCAmelCase ( self : Optional[Any] ):
'''simple docstring'''
__a : List[str] = self.scheduler_classes[0]
__a : List[str] = self.get_scheduler_config()
__a : Optional[Any] = scheduler_class(**SCREAMING_SNAKE_CASE__ )
__a , __a : Any = 1_0, 0.0
scheduler.set_timesteps(SCREAMING_SNAKE_CASE__ )
__a : List[Any] = self.dummy_model()
__a : int = self.dummy_sample_deter
__a : List[Any] = self.dummy_sample_deter + 0.1
__a : List[str] = self.dummy_sample_deter - 0.1
__a : Optional[Any] = samplea.shape[0]
__a : Optional[Any] = torch.stack([samplea, samplea, samplea] , dim=0 )
__a : Union[str, Any] = torch.arange(SCREAMING_SNAKE_CASE__ )[0:3, None].repeat(1 , SCREAMING_SNAKE_CASE__ )
__a : int = model(samples.flatten(0 , 1 ) , timesteps.flatten(0 , 1 ) )
__a : int = scheduler.batch_step_no_noise(SCREAMING_SNAKE_CASE__ , timesteps.flatten(0 , 1 ) , samples.flatten(0 , 1 ) , SCREAMING_SNAKE_CASE__ )
__a : Dict = torch.sum(torch.abs(SCREAMING_SNAKE_CASE__ ) )
__a : Optional[Any] = torch.mean(torch.abs(SCREAMING_SNAKE_CASE__ ) )
assert abs(result_sum.item() - 1_147.7_904 ) < 1e-2
assert abs(result_mean.item() - 0.4_982 ) < 1e-3
def __lowerCAmelCase ( self : str ):
'''simple docstring'''
__a : List[str] = self.full_loop()
__a : Tuple = torch.sum(torch.abs(SCREAMING_SNAKE_CASE__ ) )
__a : int = torch.mean(torch.abs(SCREAMING_SNAKE_CASE__ ) )
assert abs(result_sum.item() - 172.0_067 ) < 1e-2
assert abs(result_mean.item() - 0.223_967 ) < 1e-3
def __lowerCAmelCase ( self : Optional[int] ):
'''simple docstring'''
__a : Optional[int] = self.full_loop(prediction_type='v_prediction' )
__a : str = torch.sum(torch.abs(SCREAMING_SNAKE_CASE__ ) )
__a : Union[str, Any] = torch.mean(torch.abs(SCREAMING_SNAKE_CASE__ ) )
assert abs(result_sum.item() - 52.5_302 ) < 1e-2
assert abs(result_mean.item() - 0.0_684 ) < 1e-3
def __lowerCAmelCase ( self : int ):
'''simple docstring'''
__a : Union[str, Any] = self.full_loop(set_alpha_to_one=SCREAMING_SNAKE_CASE__ , beta_start=0.01 )
__a : Optional[int] = torch.sum(torch.abs(SCREAMING_SNAKE_CASE__ ) )
__a : Optional[int] = torch.mean(torch.abs(SCREAMING_SNAKE_CASE__ ) )
assert abs(result_sum.item() - 149.8_295 ) < 1e-2
assert abs(result_mean.item() - 0.1_951 ) < 1e-3
def __lowerCAmelCase ( self : str ):
'''simple docstring'''
__a : Dict = self.full_loop(set_alpha_to_one=SCREAMING_SNAKE_CASE__ , beta_start=0.01 )
__a : str = torch.sum(torch.abs(SCREAMING_SNAKE_CASE__ ) )
__a : Tuple = torch.mean(torch.abs(SCREAMING_SNAKE_CASE__ ) )
assert abs(result_sum.item() - 149.0_784 ) < 1e-2
assert abs(result_mean.item() - 0.1_941 ) < 1e-3
| 47 | 1 |
import argparse
import logging
import os
import datasets
import tensorflow as tf
from transformers import AutoTokenizer
SCREAMING_SNAKE_CASE__ = logging.getLogger(__name__)
def UpperCAmelCase__ ( ):
__a : Union[str, Any] = argparse.ArgumentParser(
description='Prepare TFRecord shards from pre-tokenized samples of the wikitext dataset.' )
parser.add_argument(
'--dataset_name' , type=lowerCamelCase_ , default='wikitext' , help='Name of the training. Explore datasets at: hf.co/datasets.' , )
parser.add_argument(
'--dataset_config' , type=lowerCamelCase_ , default='wikitext-103-raw-v1' , help='Configuration name of the dataset.' )
parser.add_argument(
'--tokenizer_name_or_path' , type=lowerCamelCase_ , default='sayakpaul/unigram-tokenizer-wikitext' , help='Tokenizer identifier. Can be a local filepath or a Hub identifier.' , )
parser.add_argument(
'--shard_size' , type=lowerCamelCase_ , default=1_0_0_0 , help='Number of entries to go in a single shard.' , )
parser.add_argument('--split' , type=lowerCamelCase_ , default='train' , choices=['train', 'test', 'validation'] )
parser.add_argument(
'--limit' , default=lowerCamelCase_ , type=lowerCamelCase_ , help='Limit the number of shards (used for debugging).' , )
parser.add_argument(
'--max_length' , type=lowerCamelCase_ , default=5_1_2 , help='Maximum sequence length. For training on TPUs, it helps to have a maximum'
' sequence length that is a multiple of 8.' , )
parser.add_argument(
'--output_dir' , default='tf-tpu' , type=lowerCamelCase_ , help='Output directory where the TFRecord shards will be saved. If the'
' path is appended with `gs://` (\'gs://tf-tpu\', for example) then the TFRecord'
' shards will be directly saved to a Google Cloud Storage bucket.' , )
__a : int = parser.parse_args()
return args
def UpperCAmelCase__ ( lowerCamelCase_ : Dict ):
def fn(lowerCamelCase_ : int ):
return tokenizer(examples['text'] )
return fn
def UpperCAmelCase__ ( lowerCamelCase_ : Dict ):
__a : Dict = []
for i in range(len(tokenized_data['input_ids'] ) ):
__a : Optional[int] = {
'input_ids': tf.train.Feature(intaa_list=tf.train.IntaaList(value=tokenized_data['input_ids'][i] ) ),
'attention_mask': tf.train.Feature(
intaa_list=tf.train.IntaaList(value=tokenized_data['attention_mask'][i] ) ),
}
__a : Optional[Any] = tf.train.Features(feature=lowerCamelCase_ )
__a : Dict = tf.train.Example(features=lowerCamelCase_ )
__a : Any = example.SerializeToString()
records.append(lowerCamelCase_ )
return records
def UpperCAmelCase__ ( lowerCamelCase_ : Tuple ):
__a : Optional[Any] = datasets.load_dataset(args.dataset_name , args.dataset_config , split=args.split )
if args.limit is not None:
__a : Optional[int] = min(len(lowerCamelCase_ ) , args.limit )
__a : List[str] = dataset.select(range(lowerCamelCase_ ) )
print(f'''Limiting the dataset to {args.limit} entries.''' )
__a : Any = AutoTokenizer.from_pretrained(args.tokenizer_name_or_path )
# Handle output directory creation.
# For serializing into a Google Cloud Storage Bucket, one needs to first
# create a bucket.
if "gs" not in args.output_dir:
if not os.path.exists(args.output_dir ):
os.makedirs(args.output_dir )
__a : Optional[Any] = os.path.join(args.output_dir , args.split )
if not os.path.exists(lowerCamelCase_ ):
os.makedirs(lowerCamelCase_ )
else:
__a : Tuple = os.path.join(args.output_dir , args.split )
# Tokenize the whole dataset at once.
__a : Dict = tokenize_function(lowerCamelCase_ )
__a : List[str] = dataset.map(lowerCamelCase_ , batched=lowerCamelCase_ , num_proc=4 , remove_columns=['text'] )
# We need to concatenate all our texts together, and then split the result
# into chunks of a fixed size, which we will call block_size. To do this, we
# will use the map method again, with the option batched=True. When we use batched=True,
# the function we pass to map() will be passed multiple inputs at once, allowing us
# to group them into more or fewer examples than we had in the input.
# This allows us to create our new fixed-length samples. The advantage of this
# method is that we don't lose a whole lot of content from the dataset compared to the
# case where we simply tokenize with a pre-defined max_length.
def group_texts(lowerCamelCase_ : Tuple ):
# Concatenate all texts.
__a : Optional[int] = {k: sum(examples[k] , [] ) for k in examples.keys()}
__a : Dict = len(concatenated_examples[list(examples.keys() )[0]] )
# We drop the small remainder, though you could add padding instead if the model supports it
# In this, as in all things, we advise you to follow your heart 🫀
__a : int = (total_length // args.max_length) * args.max_length
# Split by chunks of max_len.
__a : int = {
k: [t[i : i + args.max_length] for i in range(0 , lowerCamelCase_ , args.max_length )]
for k, t in concatenated_examples.items()
}
return result
__a : Union[str, Any] = dataset_tokenized.map(lowerCamelCase_ , batched=lowerCamelCase_ , batch_size=1_0_0_0 , num_proc=4 )
__a : Union[str, Any] = 0
__a : Union[str, Any] = 0
for shard in range(0 , len(lowerCamelCase_ ) , args.shard_size ):
__a : List[Any] = grouped_dataset[shard : shard + args.shard_size]
__a : Any = len(dataset_snapshot['input_ids'] )
__a : str = os.path.join(lowerCamelCase_ , f'''dataset-{shard_count}-{records_containing}.tfrecord''' )
__a : int = get_serialized_examples(lowerCamelCase_ )
with tf.io.TFRecordWriter(lowerCamelCase_ ) as out_file:
for i in range(len(lowerCamelCase_ ) ):
__a : Dict = serialized_examples[i]
out_file.write(lowerCamelCase_ )
print('Wrote file {} containing {} records'.format(lowerCamelCase_ , lowerCamelCase_ ) )
shard_count += 1
total_records += records_containing
with open(f'''split-{args.split}-records-count.txt''' , 'w' ) as f:
print(f'''Total {args.split} records: {total_records}''' , file=lowerCamelCase_ )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ = parse_args()
main(args)
| 47 |
def UpperCAmelCase__ ( lowerCamelCase_ : list[int] , lowerCamelCase_ : list[int] ):
# Check if the input is valid
if not len(lowerCamelCase_ ) == len(lowerCamelCase_ ) == 3:
raise ValueError('Please enter a valid equation.' )
if equationa[0] == equationa[1] == equationa[0] == equationa[1] == 0:
raise ValueError('Both a & b of two equations can\'t be zero.' )
# Extract the coefficients
__a , __a , __a : Optional[Any] = equationa
__a , __a , __a : Optional[int] = equationa
# Calculate the determinants of the matrices
__a : str = aa * ba - aa * ba
__a : Tuple = ca * ba - ca * ba
__a : Union[str, Any] = aa * ca - aa * ca
# Check if the system of linear equations has a solution (using Cramer's rule)
if determinant == 0:
if determinant_x == determinant_y == 0:
raise ValueError('Infinite solutions. (Consistent system)' )
else:
raise ValueError('No solution. (Inconsistent system)' )
else:
if determinant_x == determinant_y == 0:
# Trivial solution (Inconsistent system)
return (0.0, 0.0)
else:
__a : Any = determinant_x / determinant
__a : Optional[Any] = determinant_y / determinant
# Non-Trivial Solution (Consistent system)
return (x, y)
| 47 | 1 |
import logging
import os
import sys
from pathlib import Path
from unittest.mock import patch
from parameterized import parameterized
from run_eval import run_generate
from run_eval_search import run_search
from transformers.testing_utils import CaptureStdout, TestCasePlus, slow
from utils import ROUGE_KEYS
logging.basicConfig(level=logging.DEBUG)
SCREAMING_SNAKE_CASE__ = logging.getLogger()
def UpperCAmelCase__ ( lowerCamelCase_ : Path , lowerCamelCase_ : list ):
__a : Tuple = '\n'.join(lowerCamelCase_ )
Path(lowerCamelCase_ ).open('w' ).writelines(lowerCamelCase_ )
SCREAMING_SNAKE_CASE__ = '''patrickvonplaten/t5-tiny-random'''
SCREAMING_SNAKE_CASE__ = '''sshleifer/bart-tiny-random'''
SCREAMING_SNAKE_CASE__ = '''sshleifer/tiny-mbart'''
SCREAMING_SNAKE_CASE__ = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
logging.disable(logging.CRITICAL) # remove noisy download output from tracebacks
class _UpperCamelCase( __lowerCamelCase ):
def __lowerCAmelCase ( self : str , SCREAMING_SNAKE_CASE__ : Optional[int] ):
'''simple docstring'''
__a : Dict = Path(self.get_auto_remove_tmp_dir() ) / 'utest_input.source'
__a : str = input_file_name.parent / 'utest_output.txt'
assert not output_file_name.exists()
__a : Optional[Any] = [' New York (CNN)When Liana Barrientos was 23 years old, she got married in Westchester County.']
_dump_articles(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
__a : List[str] = str(Path(self.get_auto_remove_tmp_dir() ) / 'scores.json' )
__a : Any = 'translation_en_to_de' if model == T5_TINY else 'summarization'
__a : Union[str, Any] = f'''
run_eval_search.py
{model}
{input_file_name}
{output_file_name}
--score_path {score_path}
--task {task}
--num_beams 2
--length_penalty 2.0
'''.split()
with patch.object(SCREAMING_SNAKE_CASE__ , 'argv' , SCREAMING_SNAKE_CASE__ ):
run_generate()
assert Path(SCREAMING_SNAKE_CASE__ ).exists()
# os.remove(Path(output_file_name))
def __lowerCAmelCase ( self : str ):
'''simple docstring'''
self.run_eval_tester(SCREAMING_SNAKE_CASE__ )
@parameterized.expand([BART_TINY, MBART_TINY] )
@slow
def __lowerCAmelCase ( self : Any , SCREAMING_SNAKE_CASE__ : List[Any] ):
'''simple docstring'''
self.run_eval_tester(SCREAMING_SNAKE_CASE__ )
@parameterized.expand([T5_TINY, MBART_TINY] )
@slow
def __lowerCAmelCase ( self : int , SCREAMING_SNAKE_CASE__ : Dict ):
'''simple docstring'''
__a : str = Path(self.get_auto_remove_tmp_dir() ) / 'utest_input.source'
__a : Any = input_file_name.parent / 'utest_output.txt'
assert not output_file_name.exists()
__a : Dict = {
'en': ['Machine learning is great, isn\'t it?', 'I like to eat bananas', 'Tomorrow is another great day!'],
'de': [
'Maschinelles Lernen ist großartig, oder?',
'Ich esse gerne Bananen',
'Morgen ist wieder ein toller Tag!',
],
}
__a : Dict = Path(self.get_auto_remove_tmp_dir() )
__a : Tuple = str(tmp_dir / 'scores.json' )
__a : List[str] = str(tmp_dir / 'val.target' )
_dump_articles(SCREAMING_SNAKE_CASE__ , text['en'] )
_dump_articles(SCREAMING_SNAKE_CASE__ , text['de'] )
__a : Dict = 'translation_en_to_de' if model == T5_TINY else 'summarization'
__a : Any = f'''
run_eval_search.py
{model}
{str(SCREAMING_SNAKE_CASE__ )}
{str(SCREAMING_SNAKE_CASE__ )}
--score_path {score_path}
--reference_path {reference_path}
--task {task}
'''.split()
testargs.extend(['--search', 'num_beams=1:2 length_penalty=0.9:1.0'] )
with patch.object(SCREAMING_SNAKE_CASE__ , 'argv' , SCREAMING_SNAKE_CASE__ ):
with CaptureStdout() as cs:
run_search()
__a : List[str] = [' num_beams | length_penalty', model, 'Best score args']
__a : Optional[Any] = ['Info']
if "translation" in task:
expected_strings.append('bleu' )
else:
expected_strings.extend(SCREAMING_SNAKE_CASE__ )
for w in expected_strings:
assert w in cs.out
for w in un_expected_strings:
assert w not in cs.out
assert Path(SCREAMING_SNAKE_CASE__ ).exists()
os.remove(Path(SCREAMING_SNAKE_CASE__ ) )
| 47 |
from ...utils import is_note_seq_available, is_transformers_available, is_torch_available
from ...utils import OptionalDependencyNotAvailable
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .notes_encoder import SpectrogramNotesEncoder
from .continous_encoder import SpectrogramContEncoder
from .pipeline_spectrogram_diffusion import (
SpectrogramContEncoder,
SpectrogramDiffusionPipeline,
TaFilmDecoder,
)
try:
if not (is_transformers_available() and is_torch_available() and is_note_seq_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_transformers_and_torch_and_note_seq_objects import * # noqa F403
else:
from .midi_utils import MidiProcessor
| 47 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
SCREAMING_SNAKE_CASE__ = {'''processing_layoutxlm''': ['''LayoutXLMProcessor''']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ = ['''LayoutXLMTokenizer''']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ = ['''LayoutXLMTokenizerFast''']
if TYPE_CHECKING:
from .processing_layoutxlm import LayoutXLMProcessor
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_layoutxlm import LayoutXLMTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_layoutxlm_fast import LayoutXLMTokenizerFast
else:
import sys
SCREAMING_SNAKE_CASE__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 47 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
SCREAMING_SNAKE_CASE__ = {
'''configuration_bridgetower''': [
'''BRIDGETOWER_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''BridgeTowerConfig''',
'''BridgeTowerTextConfig''',
'''BridgeTowerVisionConfig''',
],
'''processing_bridgetower''': ['''BridgeTowerProcessor'''],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ = ['''BridgeTowerImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ = [
'''BRIDGETOWER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''BridgeTowerForContrastiveLearning''',
'''BridgeTowerForImageAndTextRetrieval''',
'''BridgeTowerForMaskedLM''',
'''BridgeTowerModel''',
'''BridgeTowerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_bridgetower import (
BRIDGETOWER_PRETRAINED_CONFIG_ARCHIVE_MAP,
BridgeTowerConfig,
BridgeTowerTextConfig,
BridgeTowerVisionConfig,
)
from .processing_bridgetower import BridgeTowerProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_bridgetower import BridgeTowerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_bridgetower import (
BRIDGETOWER_PRETRAINED_MODEL_ARCHIVE_LIST,
BridgeTowerForContrastiveLearning,
BridgeTowerForImageAndTextRetrieval,
BridgeTowerForMaskedLM,
BridgeTowerModel,
BridgeTowerPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
| 47 | 1 |
# flake8: noqa
# Lint as: python3
from typing import Dict, List, Optional, Type
from .. import config
from ..utils import logging
from .formatting import (
ArrowFormatter,
CustomFormatter,
Formatter,
PandasFormatter,
PythonFormatter,
TensorFormatter,
format_table,
query_table,
)
from .np_formatter import NumpyFormatter
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ = {}
SCREAMING_SNAKE_CASE__ = {}
SCREAMING_SNAKE_CASE__ = {}
def UpperCAmelCase__ ( lowerCamelCase_ : type , lowerCamelCase_ : Optional[str] , lowerCamelCase_ : Optional[List[str]] = None , ):
__a : str = aliases if aliases is not None else []
if format_type in _FORMAT_TYPES:
logger.warning(
f'''Overwriting format type \'{format_type}\' ({_FORMAT_TYPES[format_type].__name__} -> {formatter_cls.__name__})''' )
__a : Tuple = formatter_cls
for alias in set(aliases + [format_type] ):
if alias in _FORMAT_TYPES_ALIASES:
logger.warning(
f'''Overwriting format type alias \'{alias}\' ({_FORMAT_TYPES_ALIASES[alias]} -> {format_type})''' )
__a : Dict = format_type
def UpperCAmelCase__ ( lowerCamelCase_ : Exception , lowerCamelCase_ : Optional[str] , lowerCamelCase_ : Optional[List[str]] = None ):
__a : List[Any] = aliases if aliases is not None else []
for alias in set(aliases + [format_type] ):
__a : int = unavailable_error
# Here we define all the available formatting functions that can be used by `Dataset.set_format`
_register_formatter(PythonFormatter, None, aliases=['''python'''])
_register_formatter(ArrowFormatter, '''arrow''', aliases=['''pa''', '''pyarrow'''])
_register_formatter(NumpyFormatter, '''numpy''', aliases=['''np'''])
_register_formatter(PandasFormatter, '''pandas''', aliases=['''pd'''])
_register_formatter(CustomFormatter, '''custom''')
if config.TORCH_AVAILABLE:
from .torch_formatter import TorchFormatter
_register_formatter(TorchFormatter, '''torch''', aliases=['''pt''', '''pytorch'''])
else:
SCREAMING_SNAKE_CASE__ = ValueError('''PyTorch needs to be installed to be able to return PyTorch tensors.''')
_register_unavailable_formatter(_torch_error, '''torch''', aliases=['''pt''', '''pytorch'''])
if config.TF_AVAILABLE:
from .tf_formatter import TFFormatter
_register_formatter(TFFormatter, '''tensorflow''', aliases=['''tf'''])
else:
SCREAMING_SNAKE_CASE__ = ValueError('''Tensorflow needs to be installed to be able to return Tensorflow tensors.''')
_register_unavailable_formatter(_tf_error, '''tensorflow''', aliases=['''tf'''])
if config.JAX_AVAILABLE:
from .jax_formatter import JaxFormatter
_register_formatter(JaxFormatter, '''jax''', aliases=[])
else:
SCREAMING_SNAKE_CASE__ = ValueError('''JAX needs to be installed to be able to return JAX arrays.''')
_register_unavailable_formatter(_jax_error, '''jax''', aliases=[])
def UpperCAmelCase__ ( lowerCamelCase_ : Optional[str] ):
if format_type in _FORMAT_TYPES_ALIASES:
return _FORMAT_TYPES_ALIASES[format_type]
else:
return format_type
def UpperCAmelCase__ ( lowerCamelCase_ : Optional[str] , **lowerCamelCase_ : Union[str, Any] ):
__a : Optional[int] = get_format_type_from_alias(lowerCamelCase_ )
if format_type in _FORMAT_TYPES:
return _FORMAT_TYPES[format_type](**lowerCamelCase_ )
if format_type in _FORMAT_TYPES_ALIASES_UNAVAILABLE:
raise _FORMAT_TYPES_ALIASES_UNAVAILABLE[format_type]
else:
raise ValueError(
f'''Return type should be None or selected in {list(type for type in _FORMAT_TYPES.keys() if type != None )}, but got \'{format_type}\'''' )
| 47 |
from string import ascii_lowercase, ascii_uppercase
def UpperCAmelCase__ ( lowerCamelCase_ : str ):
if not sentence:
return ""
__a : Union[str, Any] = dict(zip(lowerCamelCase_ , lowerCamelCase_ ) )
return lower_to_upper.get(sentence[0] , sentence[0] ) + sentence[1:]
if __name__ == "__main__":
from doctest import testmod
testmod()
| 47 | 1 |
def UpperCAmelCase__ ( lowerCamelCase_ : int , lowerCamelCase_ : int ):
if a < 0 or b < 0:
raise ValueError('the value of both inputs must be positive' )
__a : int = str(bin(lowerCamelCase_ ) )[2:] # remove the leading "0b"
__a : Union[str, Any] = str(bin(lowerCamelCase_ ) )[2:]
__a : Dict = max(len(lowerCamelCase_ ) , len(lowerCamelCase_ ) )
return "0b" + "".join(
str(int('1' in (char_a, char_b) ) )
for char_a, char_b in zip(a_binary.zfill(lowerCamelCase_ ) , b_binary.zfill(lowerCamelCase_ ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 47 |
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ = {
'''asapp/sew-d-tiny-100k''': '''https://huggingface.co/asapp/sew-d-tiny-100k/resolve/main/config.json''',
# See all SEW-D models at https://huggingface.co/models?filter=sew-d
}
class _UpperCamelCase( __lowerCamelCase ):
__SCREAMING_SNAKE_CASE : Optional[Any] = '''sew-d'''
def __init__( self : Dict , SCREAMING_SNAKE_CASE__ : Dict=3_2 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=7_6_8 , SCREAMING_SNAKE_CASE__ : Optional[Any]=1_2 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=1_2 , SCREAMING_SNAKE_CASE__ : str=3_0_7_2 , SCREAMING_SNAKE_CASE__ : str=2 , SCREAMING_SNAKE_CASE__ : Optional[Any]=5_1_2 , SCREAMING_SNAKE_CASE__ : List[str]=2_5_6 , SCREAMING_SNAKE_CASE__ : Optional[Any]=True , SCREAMING_SNAKE_CASE__ : Tuple=True , SCREAMING_SNAKE_CASE__ : List[str]=("p2c", "c2p") , SCREAMING_SNAKE_CASE__ : str="layer_norm" , SCREAMING_SNAKE_CASE__ : Tuple="gelu_python" , SCREAMING_SNAKE_CASE__ : Tuple=0.1 , SCREAMING_SNAKE_CASE__ : Optional[int]=0.1 , SCREAMING_SNAKE_CASE__ : Any=0.1 , SCREAMING_SNAKE_CASE__ : Any=0.0 , SCREAMING_SNAKE_CASE__ : str=0.1 , SCREAMING_SNAKE_CASE__ : Optional[int]=0.02 , SCREAMING_SNAKE_CASE__ : int=1e-7 , SCREAMING_SNAKE_CASE__ : Any=1e-5 , SCREAMING_SNAKE_CASE__ : Optional[int]="group" , SCREAMING_SNAKE_CASE__ : Optional[Any]="gelu" , SCREAMING_SNAKE_CASE__ : Optional[int]=(6_4, 1_2_8, 1_2_8, 1_2_8, 1_2_8, 2_5_6, 2_5_6, 2_5_6, 2_5_6, 5_1_2, 5_1_2, 5_1_2, 5_1_2) , SCREAMING_SNAKE_CASE__ : List[Any]=(5, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1) , SCREAMING_SNAKE_CASE__ : str=(1_0, 3, 1, 3, 1, 3, 1, 3, 1, 2, 1, 2, 1) , SCREAMING_SNAKE_CASE__ : Optional[int]=False , SCREAMING_SNAKE_CASE__ : Optional[int]=1_2_8 , SCREAMING_SNAKE_CASE__ : Tuple=1_6 , SCREAMING_SNAKE_CASE__ : Any=True , SCREAMING_SNAKE_CASE__ : List[Any]=0.05 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=1_0 , SCREAMING_SNAKE_CASE__ : List[Any]=2 , SCREAMING_SNAKE_CASE__ : int=0.0 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=1_0 , SCREAMING_SNAKE_CASE__ : Optional[int]=0 , SCREAMING_SNAKE_CASE__ : Optional[int]="mean" , SCREAMING_SNAKE_CASE__ : List[Any]=False , SCREAMING_SNAKE_CASE__ : List[str]=False , SCREAMING_SNAKE_CASE__ : str=2_5_6 , SCREAMING_SNAKE_CASE__ : str=0 , SCREAMING_SNAKE_CASE__ : List[Any]=1 , SCREAMING_SNAKE_CASE__ : List[Any]=2 , **SCREAMING_SNAKE_CASE__ : Any , ):
'''simple docstring'''
super().__init__(**SCREAMING_SNAKE_CASE__ , pad_token_id=SCREAMING_SNAKE_CASE__ , bos_token_id=SCREAMING_SNAKE_CASE__ , eos_token_id=SCREAMING_SNAKE_CASE__ )
__a : Optional[int] = hidden_size
__a : Optional[Any] = feat_extract_norm
__a : List[str] = feat_extract_activation
__a : Dict = list(SCREAMING_SNAKE_CASE__ )
__a : Union[str, Any] = list(SCREAMING_SNAKE_CASE__ )
__a : List[str] = list(SCREAMING_SNAKE_CASE__ )
__a : int = conv_bias
__a : Tuple = num_conv_pos_embeddings
__a : List[str] = num_conv_pos_embedding_groups
__a : Optional[Any] = len(self.conv_dim )
__a : Union[str, Any] = num_hidden_layers
__a : Optional[Any] = intermediate_size
__a : Union[str, Any] = squeeze_factor
__a : List[Any] = max_position_embeddings
__a : Tuple = position_buckets
__a : Optional[int] = share_att_key
__a : List[str] = relative_attention
__a : Any = norm_rel_ebd
__a : Any = list(SCREAMING_SNAKE_CASE__ )
__a : Union[str, Any] = hidden_act
__a : str = num_attention_heads
__a : Union[str, Any] = hidden_dropout
__a : Optional[int] = attention_dropout
__a : List[str] = activation_dropout
__a : int = feat_proj_dropout
__a : int = final_dropout
__a : Dict = layer_norm_eps
__a : Tuple = feature_layer_norm_eps
__a : str = initializer_range
__a : Tuple = vocab_size
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
'Configuration for convolutional layers is incorrect.'
'It is required that `len(config.conv_dim)` == `len(config.conv_stride)` == `len(config.conv_kernel)`,'
f'''but is `len(config.conv_dim) = {len(self.conv_dim )}`, `len(config.conv_stride)'''
f'''= {len(self.conv_stride )}`, `len(config.conv_kernel) = {len(self.conv_kernel )}`.''' )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
__a : Tuple = apply_spec_augment
__a : Optional[Any] = mask_time_prob
__a : Any = mask_time_length
__a : List[str] = mask_time_min_masks
__a : List[str] = mask_feature_prob
__a : Tuple = mask_feature_length
__a : Any = mask_feature_min_masks
# ctc loss
__a : Optional[int] = ctc_loss_reduction
__a : List[Any] = ctc_zero_infinity
# sequence classification
__a : Dict = use_weighted_layer_sum
__a : Optional[Any] = classifier_proj_size
@property
def __lowerCAmelCase ( self : Optional[Any] ):
'''simple docstring'''
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 47 | 1 |
from __future__ import annotations
from collections.abc import Callable
from typing import Generic, TypeVar
SCREAMING_SNAKE_CASE__ = TypeVar('''T''')
SCREAMING_SNAKE_CASE__ = TypeVar('''U''')
class _UpperCamelCase( Generic[T, U] ):
def __init__( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : T | None , SCREAMING_SNAKE_CASE__ : U | None ):
'''simple docstring'''
__a : Optional[int] = key
__a : List[Any] = val
__a : DoubleLinkedListNode[T, U] | None = None
__a : DoubleLinkedListNode[T, U] | None = None
def __repr__( self : Any ):
'''simple docstring'''
return (
f'''Node: key: {self.key}, val: {self.val}, '''
f'''has next: {bool(self.next )}, has prev: {bool(self.prev )}'''
)
class _UpperCamelCase( Generic[T, U] ):
def __init__( self : Tuple ):
'''simple docstring'''
__a : DoubleLinkedListNode[T, U] = DoubleLinkedListNode(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
__a : DoubleLinkedListNode[T, U] = DoubleLinkedListNode(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
__a , __a : Optional[int] = self.rear, self.head
def __repr__( self : List[str] ):
'''simple docstring'''
__a : Any = ['DoubleLinkedList']
__a : Optional[Any] = self.head
while node.next is not None:
rep.append(str(SCREAMING_SNAKE_CASE__ ) )
__a : List[Any] = node.next
rep.append(str(self.rear ) )
return ",\n ".join(SCREAMING_SNAKE_CASE__ )
def __lowerCAmelCase ( self : int , SCREAMING_SNAKE_CASE__ : DoubleLinkedListNode[T, U] ):
'''simple docstring'''
__a : List[Any] = self.rear.prev
# All nodes other than self.head are guaranteed to have non-None previous
assert previous is not None
__a : List[str] = node
__a : List[Any] = previous
__a : Dict = node
__a : int = self.rear
def __lowerCAmelCase ( self : List[Any] , SCREAMING_SNAKE_CASE__ : DoubleLinkedListNode[T, U] ):
'''simple docstring'''
if node.prev is None or node.next is None:
return None
__a : Optional[int] = node.next
__a : Optional[int] = node.prev
__a : List[Any] = None
__a : Union[str, Any] = None
return node
class _UpperCamelCase( Generic[T, U] ):
__SCREAMING_SNAKE_CASE : dict[Callable[[T], U], LRUCache[T, U]] = {}
def __init__( self : str , SCREAMING_SNAKE_CASE__ : int ):
'''simple docstring'''
__a : DoubleLinkedList[T, U] = DoubleLinkedList()
__a : List[Any] = capacity
__a : Optional[Any] = 0
__a : Tuple = 0
__a : str = 0
__a : dict[T, DoubleLinkedListNode[T, U]] = {}
def __repr__( self : Dict ):
'''simple docstring'''
return (
f'''CacheInfo(hits={self.hits}, misses={self.miss}, '''
f'''capacity={self.capacity}, current size={self.num_keys})'''
)
def __contains__( self : Tuple , SCREAMING_SNAKE_CASE__ : T ):
'''simple docstring'''
return key in self.cache
def __lowerCAmelCase ( self : Union[str, Any] , SCREAMING_SNAKE_CASE__ : T ):
'''simple docstring'''
if key in self.cache:
self.hits += 1
__a : DoubleLinkedListNode[T, U] = self.cache[key]
__a : int = self.list.remove(self.cache[key] )
assert node == value_node
# node is guaranteed not None because it is in self.cache
assert node is not None
self.list.add(SCREAMING_SNAKE_CASE__ )
return node.val
self.miss += 1
return None
def __lowerCAmelCase ( self : Any , SCREAMING_SNAKE_CASE__ : T , SCREAMING_SNAKE_CASE__ : U ):
'''simple docstring'''
if key not in self.cache:
if self.num_keys >= self.capacity:
# delete first node (oldest) when over capacity
__a : List[str] = self.list.head.next
# guaranteed to have a non-None first node when num_keys > 0
# explain to type checker via assertions
assert first_node is not None
assert first_node.key is not None
assert (
self.list.remove(SCREAMING_SNAKE_CASE__ ) is not None
) # node guaranteed to be in list assert node.key is not None
del self.cache[first_node.key]
self.num_keys -= 1
__a : int = DoubleLinkedListNode(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
self.list.add(self.cache[key] )
self.num_keys += 1
else:
# bump node to the end of the list, update value
__a : Union[str, Any] = self.list.remove(self.cache[key] )
assert node is not None # node guaranteed to be in list
__a : Union[str, Any] = value
self.list.add(SCREAMING_SNAKE_CASE__ )
@classmethod
def __lowerCAmelCase ( cls : Union[str, Any] , SCREAMING_SNAKE_CASE__ : int = 1_2_8 ):
'''simple docstring'''
def cache_decorator_inner(SCREAMING_SNAKE_CASE__ : Callable[[T], U] ) -> Callable[..., U]:
def cache_decorator_wrapper(*SCREAMING_SNAKE_CASE__ : T ) -> U:
if func not in cls.decorator_function_to_instance_map:
__a : Any = LRUCache(SCREAMING_SNAKE_CASE__ )
__a : Any = cls.decorator_function_to_instance_map[func].get(args[0] )
if result is None:
__a : Tuple = func(*SCREAMING_SNAKE_CASE__ )
cls.decorator_function_to_instance_map[func].put(args[0] , SCREAMING_SNAKE_CASE__ )
return result
def cache_info() -> LRUCache[T, U]:
return cls.decorator_function_to_instance_map[func]
setattr(SCREAMING_SNAKE_CASE__ , 'cache_info' , SCREAMING_SNAKE_CASE__ ) # noqa: B010
return cache_decorator_wrapper
return cache_decorator_inner
if __name__ == "__main__":
import doctest
doctest.testmod()
| 47 |
from __future__ import annotations
from sys import maxsize
from typing import Generic, TypeVar
SCREAMING_SNAKE_CASE__ = TypeVar('''T''')
def UpperCAmelCase__ ( lowerCamelCase_ : int ):
return (position - 1) // 2
def UpperCAmelCase__ ( lowerCamelCase_ : int ):
return (2 * position) + 1
def UpperCAmelCase__ ( lowerCamelCase_ : int ):
return (2 * position) + 2
class _UpperCamelCase( Generic[T] ):
def __init__( self : List[str] ):
'''simple docstring'''
__a : list[tuple[T, int]] = []
__a : dict[T, int] = {}
__a : int = 0
def __len__( self : Any ):
'''simple docstring'''
return self.elements
def __repr__( self : Any ):
'''simple docstring'''
return str(self.heap )
def __lowerCAmelCase ( self : Optional[Any] ):
'''simple docstring'''
return self.elements == 0
def __lowerCAmelCase ( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : T , SCREAMING_SNAKE_CASE__ : int ):
'''simple docstring'''
self.heap.append((elem, weight) )
__a : List[Any] = self.elements
self.elements += 1
self._bubble_up(SCREAMING_SNAKE_CASE__ )
def __lowerCAmelCase ( self : Tuple ):
'''simple docstring'''
if self.elements > 1:
self._swap_nodes(0 , self.elements - 1 )
__a , __a : Union[str, Any] = self.heap.pop()
del self.position_map[elem]
self.elements -= 1
if self.elements > 0:
__a , __a : Dict = self.heap[0]
self._bubble_down(SCREAMING_SNAKE_CASE__ )
return elem
def __lowerCAmelCase ( self : List[str] , SCREAMING_SNAKE_CASE__ : T , SCREAMING_SNAKE_CASE__ : int ):
'''simple docstring'''
__a : List[Any] = self.position_map[elem]
__a : str = (elem, weight)
if position > 0:
__a : Tuple = get_parent_position(SCREAMING_SNAKE_CASE__ )
__a , __a : Dict = self.heap[parent_position]
if parent_weight > weight:
self._bubble_up(SCREAMING_SNAKE_CASE__ )
else:
self._bubble_down(SCREAMING_SNAKE_CASE__ )
else:
self._bubble_down(SCREAMING_SNAKE_CASE__ )
def __lowerCAmelCase ( self : Any , SCREAMING_SNAKE_CASE__ : T ):
'''simple docstring'''
__a : List[Any] = self.position_map[elem]
if curr_pos == 0:
return None
__a : List[str] = get_parent_position(SCREAMING_SNAKE_CASE__ )
__a , __a : str = self.heap[curr_pos]
__a , __a : Optional[int] = self.heap[parent_position]
if parent_weight > weight:
self._swap_nodes(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
return self._bubble_up(SCREAMING_SNAKE_CASE__ )
return None
def __lowerCAmelCase ( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : T ):
'''simple docstring'''
__a : int = self.position_map[elem]
__a , __a : Optional[Any] = self.heap[curr_pos]
__a : Tuple = get_child_left_position(SCREAMING_SNAKE_CASE__ )
__a : Optional[Any] = get_child_right_position(SCREAMING_SNAKE_CASE__ )
if child_left_position < self.elements and child_right_position < self.elements:
__a , __a : str = self.heap[child_left_position]
__a , __a : List[str] = self.heap[child_right_position]
if child_right_weight < child_left_weight and child_right_weight < weight:
self._swap_nodes(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
return self._bubble_down(SCREAMING_SNAKE_CASE__ )
if child_left_position < self.elements:
__a , __a : Any = self.heap[child_left_position]
if child_left_weight < weight:
self._swap_nodes(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
return self._bubble_down(SCREAMING_SNAKE_CASE__ )
else:
return None
if child_right_position < self.elements:
__a , __a : Union[str, Any] = self.heap[child_right_position]
if child_right_weight < weight:
self._swap_nodes(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
return self._bubble_down(SCREAMING_SNAKE_CASE__ )
return None
def __lowerCAmelCase ( self : List[Any] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int ):
'''simple docstring'''
__a : Optional[Any] = self.heap[nodea_pos][0]
__a : str = self.heap[nodea_pos][0]
__a , __a : int = (
self.heap[nodea_pos],
self.heap[nodea_pos],
)
__a : str = nodea_pos
__a : Optional[int] = nodea_pos
class _UpperCamelCase( Generic[T] ):
def __init__( self : List[Any] ):
'''simple docstring'''
__a : dict[T, dict[T, int]] = {}
__a : int = 0
def __repr__( self : Tuple ):
'''simple docstring'''
return str(self.connections )
def __len__( self : Dict ):
'''simple docstring'''
return self.nodes
def __lowerCAmelCase ( self : Optional[int] , SCREAMING_SNAKE_CASE__ : T ):
'''simple docstring'''
if node not in self.connections:
__a : Tuple = {}
self.nodes += 1
def __lowerCAmelCase ( self : Any , SCREAMING_SNAKE_CASE__ : T , SCREAMING_SNAKE_CASE__ : T , SCREAMING_SNAKE_CASE__ : int ):
'''simple docstring'''
self.add_node(SCREAMING_SNAKE_CASE__ )
self.add_node(SCREAMING_SNAKE_CASE__ )
__a : Optional[Any] = weight
__a : Any = weight
def UpperCAmelCase__ ( lowerCamelCase_ : GraphUndirectedWeighted[T] , ):
__a : dict[T, int] = {node: maxsize for node in graph.connections}
__a : dict[T, T | None] = {node: None for node in graph.connections}
__a : MinPriorityQueue[T] = MinPriorityQueue()
for node, weight in dist.items():
priority_queue.push(lowerCamelCase_ , lowerCamelCase_ )
if priority_queue.is_empty():
return dist, parent
# initialization
__a : Optional[int] = priority_queue.extract_min()
__a : int = 0
for neighbour in graph.connections[node]:
if dist[neighbour] > dist[node] + graph.connections[node][neighbour]:
__a : str = dist[node] + graph.connections[node][neighbour]
priority_queue.update_key(lowerCamelCase_ , dist[neighbour] )
__a : Optional[int] = node
# running prim's algorithm
while not priority_queue.is_empty():
__a : Any = priority_queue.extract_min()
for neighbour in graph.connections[node]:
if dist[neighbour] > dist[node] + graph.connections[node][neighbour]:
__a : Tuple = dist[node] + graph.connections[node][neighbour]
priority_queue.update_key(lowerCamelCase_ , dist[neighbour] )
__a : Dict = node
return dist, parent
| 47 | 1 |
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, BatchEncoding, PLBartTokenizer, is_torch_available
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
)
from ...test_tokenization_common import TokenizerTesterMixin
SCREAMING_SNAKE_CASE__ = get_tests_dir('''fixtures/test_sentencepiece.model''')
if is_torch_available():
from transformers.models.plbart.modeling_plbart import shift_tokens_right
SCREAMING_SNAKE_CASE__ = 5_0003
SCREAMING_SNAKE_CASE__ = 5_0002
@require_sentencepiece
@require_tokenizers
class _UpperCamelCase( __lowerCamelCase , unittest.TestCase ):
__SCREAMING_SNAKE_CASE : List[Any] = PLBartTokenizer
__SCREAMING_SNAKE_CASE : Optional[Any] = None
__SCREAMING_SNAKE_CASE : Dict = False
def __lowerCAmelCase ( self : Tuple ):
'''simple docstring'''
super().setUp()
# We have a SentencePiece fixture for testing
__a : str = PLBartTokenizer(SCREAMING_SNAKE_CASE__ , language_codes='base' , keep_accents=SCREAMING_SNAKE_CASE__ )
tokenizer.save_pretrained(self.tmpdirname )
def __lowerCAmelCase ( self : List[str] ):
'''simple docstring'''
__a : Tuple = PLBartTokenizer(SCREAMING_SNAKE_CASE__ , language_codes='base' , keep_accents=SCREAMING_SNAKE_CASE__ )
__a : Tuple = tokenizer.tokenize('This is a test' )
self.assertListEqual(SCREAMING_SNAKE_CASE__ , ['▁This', '▁is', '▁a', '▁t', 'est'] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE__ ) , [value + tokenizer.fairseq_offset for value in [2_8_5, 4_6, 1_0, 1_7_0, 3_8_2]] , )
__a : Any = tokenizer.tokenize('I was born in 92000, and this is falsé.' )
self.assertListEqual(
SCREAMING_SNAKE_CASE__ , [
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'9',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
's',
'é',
'.',
] , )
__a : Any = tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE__ )
self.assertListEqual(
SCREAMING_SNAKE_CASE__ , [
value + tokenizer.fairseq_offset
for value in [8, 2_1, 8_4, 5_5, 2_4, 1_9, 7, 2, 6_0_2, 3_4_7, 3_4_7, 3_4_7, 3, 1_2, 6_6, 4_6, 7_2, 8_0, 6, 2, 4]
] , )
__a : List[str] = tokenizer.convert_ids_to_tokens(SCREAMING_SNAKE_CASE__ )
self.assertListEqual(
SCREAMING_SNAKE_CASE__ , [
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'<unk>',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
's',
'<unk>',
'.',
] , )
__a : Optional[Any] = tokenizer.vocab_size
__a : int = [tokenizer.convert_ids_to_tokens(SCREAMING_SNAKE_CASE__ ) for x in range(end - 4 , SCREAMING_SNAKE_CASE__ )]
self.assertListEqual(SCREAMING_SNAKE_CASE__ , ['__java__', '__python__', '__en_XX__', '<mask>'] )
__a : Optional[Any] = 'java.lang.Exception, python.lang.Exception, javascript, php, ruby, go'
__a : str = tokenizer(SCREAMING_SNAKE_CASE__ ).input_ids
self.assertEqual(
tokenizer.decode(SCREAMING_SNAKE_CASE__ , skip_special_tokens=SCREAMING_SNAKE_CASE__ , clean_up_tokenization_spaces=SCREAMING_SNAKE_CASE__ ) , SCREAMING_SNAKE_CASE__ , )
def __lowerCAmelCase ( self : Tuple ):
'''simple docstring'''
__a : int = PLBartTokenizer(SCREAMING_SNAKE_CASE__ , language_codes='multi' , keep_accents=SCREAMING_SNAKE_CASE__ )
__a : str = tokenizer.tokenize('This is a test' )
self.assertListEqual(SCREAMING_SNAKE_CASE__ , ['▁This', '▁is', '▁a', '▁t', 'est'] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE__ ) , [value + tokenizer.fairseq_offset for value in [2_8_5, 4_6, 1_0, 1_7_0, 3_8_2]] , )
__a : Dict = tokenizer.tokenize('I was born in 92000, and this is falsé.' )
self.assertListEqual(
SCREAMING_SNAKE_CASE__ , [
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'9',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
's',
'é',
'.',
] , )
__a : str = tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE__ )
self.assertListEqual(
SCREAMING_SNAKE_CASE__ , [
value + tokenizer.fairseq_offset
for value in [8, 2_1, 8_4, 5_5, 2_4, 1_9, 7, 2, 6_0_2, 3_4_7, 3_4_7, 3_4_7, 3, 1_2, 6_6, 4_6, 7_2, 8_0, 6, 2, 4]
] , )
__a : int = tokenizer.convert_ids_to_tokens(SCREAMING_SNAKE_CASE__ )
self.assertListEqual(
SCREAMING_SNAKE_CASE__ , [
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'<unk>',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
's',
'<unk>',
'.',
] , )
__a : List[Any] = tokenizer.vocab_size
__a : Dict = [tokenizer.convert_ids_to_tokens(SCREAMING_SNAKE_CASE__ ) for x in range(end - 7 , SCREAMING_SNAKE_CASE__ )]
self.assertListEqual(
SCREAMING_SNAKE_CASE__ , ['__java__', '__python__', '__en_XX__', '__javascript__', '__php__', '__ruby__', '__go__'] )
__a : int = 'java.lang.Exception, python.lang.Exception, javascript, php, ruby, go'
__a : str = tokenizer(SCREAMING_SNAKE_CASE__ ).input_ids
self.assertEqual(
tokenizer.decode(SCREAMING_SNAKE_CASE__ , skip_special_tokens=SCREAMING_SNAKE_CASE__ , clean_up_tokenization_spaces=SCREAMING_SNAKE_CASE__ ) , SCREAMING_SNAKE_CASE__ , )
@require_torch
@require_sentencepiece
@require_tokenizers
class _UpperCamelCase( unittest.TestCase ):
__SCREAMING_SNAKE_CASE : Dict = '''uclanlp/plbart-python-en_XX'''
__SCREAMING_SNAKE_CASE : Any = [
'''def maximum(a,b,c):NEW_LINE_INDENTreturn max([a,b,c])''',
'''def sum(a,b,c):NEW_LINE_INDENTreturn sum([a,b,c])''',
]
__SCREAMING_SNAKE_CASE : Dict = [
'''Returns the maximum value of a b c.''',
'''Sums the values of a b c.''',
]
__SCREAMING_SNAKE_CASE : Union[str, Any] = [
134,
5452,
3_3460,
3_3441,
3_3463,
3_3465,
3_3463,
3_3449,
988,
20,
3_3456,
19,
3_3456,
771,
39,
4258,
889,
3318,
3_3441,
3_3463,
3_3465,
3_3463,
3_3449,
2471,
2,
PYTHON_CODE,
]
@classmethod
def __lowerCAmelCase ( cls : List[Any] ):
'''simple docstring'''
__a : PLBartTokenizer = PLBartTokenizer.from_pretrained(
cls.checkpoint_name , language_codes='base' , src_lang='python' , tgt_lang='en_XX' )
__a : Any = 1
return cls
def __lowerCAmelCase ( self : List[str] ):
'''simple docstring'''
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['__java__'] , 5_0_0_0_1 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['__python__'] , 5_0_0_0_2 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['__en_XX__'] , 5_0_0_0_3 )
def __lowerCAmelCase ( self : Optional[int] ):
'''simple docstring'''
__a : Optional[int] = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0]
self.assertListEqual(self.expected_src_tokens , SCREAMING_SNAKE_CASE__ )
def __lowerCAmelCase ( self : List[Any] ):
'''simple docstring'''
self.assertIn(SCREAMING_SNAKE_CASE__ , self.tokenizer.all_special_ids )
__a : Optional[int] = [EN_CODE, 9_0_3_7, 3_3_4_4_2, 5_7, 7_5_2, 1_5_3, 1_4, 5_6, 1_8, 9, 2]
__a : str = self.tokenizer.decode(SCREAMING_SNAKE_CASE__ , skip_special_tokens=SCREAMING_SNAKE_CASE__ )
__a : Any = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=SCREAMING_SNAKE_CASE__ )
self.assertEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
self.assertNotIn(self.tokenizer.eos_token , SCREAMING_SNAKE_CASE__ )
def __lowerCAmelCase ( self : Any ):
'''simple docstring'''
__a : Any = ['def sum(a,b,c):NEW_LINE_INDENTreturn sum([a,b,c])' * 2_0]
self.assertIsInstance(src_text[0] , SCREAMING_SNAKE_CASE__ )
__a : Union[str, Any] = 1_0
__a : Any = self.tokenizer(SCREAMING_SNAKE_CASE__ , max_length=SCREAMING_SNAKE_CASE__ , truncation=SCREAMING_SNAKE_CASE__ ).input_ids[0]
self.assertEqual(ids[-2] , 2 )
self.assertEqual(ids[-1] , SCREAMING_SNAKE_CASE__ )
self.assertEqual(len(SCREAMING_SNAKE_CASE__ ) , SCREAMING_SNAKE_CASE__ )
def __lowerCAmelCase ( self : Dict ):
'''simple docstring'''
self.assertListEqual(self.tokenizer.convert_tokens_to_ids(['<mask>', '__java__'] ) , [5_0_0_0_4, 5_0_0_0_1] )
def __lowerCAmelCase ( self : Tuple ):
'''simple docstring'''
__a : Dict = tempfile.mkdtemp()
__a : Optional[Any] = self.tokenizer.fairseq_tokens_to_ids
self.tokenizer.save_pretrained(SCREAMING_SNAKE_CASE__ )
__a : List[Any] = PLBartTokenizer.from_pretrained(SCREAMING_SNAKE_CASE__ )
self.assertDictEqual(new_tok.fairseq_tokens_to_ids , SCREAMING_SNAKE_CASE__ )
@require_torch
def __lowerCAmelCase ( self : Optional[Any] ):
'''simple docstring'''
__a : Tuple = self.tokenizer(self.src_text , text_target=self.tgt_text , padding=SCREAMING_SNAKE_CASE__ , return_tensors='pt' )
__a : Tuple = shift_tokens_right(batch['labels'] , self.tokenizer.pad_token_id )
# fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4
self.assertEqual(batch.input_ids[1][-2:].tolist() , [2, PYTHON_CODE] )
self.assertEqual(batch.decoder_input_ids[1][0] , SCREAMING_SNAKE_CASE__ )
self.assertEqual(batch.decoder_input_ids[1][-1] , 2 )
self.assertEqual(batch.labels[1][-2:].tolist() , [2, EN_CODE] )
@require_torch
def __lowerCAmelCase ( self : Union[str, Any] ):
'''simple docstring'''
__a : Tuple = self.tokenizer(
self.src_text , text_target=self.tgt_text , padding=SCREAMING_SNAKE_CASE__ , truncation=SCREAMING_SNAKE_CASE__ , max_length=len(self.expected_src_tokens ) , return_tensors='pt' , )
__a : int = shift_tokens_right(batch['labels'] , self.tokenizer.pad_token_id )
self.assertIsInstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
self.assertEqual((2, 2_6) , batch.input_ids.shape )
self.assertEqual((2, 2_6) , batch.attention_mask.shape )
__a : int = batch.input_ids.tolist()[0]
self.assertListEqual(self.expected_src_tokens , SCREAMING_SNAKE_CASE__ )
self.assertEqual(2 , batch.decoder_input_ids[0, -1] ) # EOS
# Test that special tokens are reset
self.assertEqual(self.tokenizer.prefix_tokens , [] )
self.assertEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id, PYTHON_CODE] )
def __lowerCAmelCase ( self : Tuple ):
'''simple docstring'''
__a : Dict = self.tokenizer(self.src_text , padding=SCREAMING_SNAKE_CASE__ , truncation=SCREAMING_SNAKE_CASE__ , max_length=3 , return_tensors='pt' )
__a : List[str] = self.tokenizer(
text_target=self.tgt_text , padding=SCREAMING_SNAKE_CASE__ , truncation=SCREAMING_SNAKE_CASE__ , max_length=1_0 , return_tensors='pt' )
__a : int = targets['input_ids']
__a : Dict = shift_tokens_right(SCREAMING_SNAKE_CASE__ , self.tokenizer.pad_token_id )
self.assertEqual(batch.input_ids.shape[1] , 3 )
self.assertEqual(batch.decoder_input_ids.shape[1] , 1_0 )
@require_torch
def __lowerCAmelCase ( self : Dict ):
'''simple docstring'''
__a : Union[str, Any] = self.tokenizer._build_translation_inputs(
'A test' , return_tensors='pt' , src_lang='en_XX' , tgt_lang='java' )
self.assertEqual(
nested_simplify(SCREAMING_SNAKE_CASE__ ) , {
# A, test, EOS, en_XX
'input_ids': [[1_5_0, 2_4_2, 2, 5_0_0_0_3]],
'attention_mask': [[1, 1, 1, 1]],
# java
'forced_bos_token_id': 5_0_0_0_1,
} , )
| 47 |
from collections.abc import Sequence
from queue import Queue
class _UpperCamelCase:
def __init__( self : Tuple , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : int=None , SCREAMING_SNAKE_CASE__ : Tuple=None ):
'''simple docstring'''
__a : Tuple = start
__a : Dict = end
__a : List[str] = val
__a : List[Any] = (start + end) // 2
__a : Optional[Any] = left
__a : List[str] = right
def __repr__( self : Dict ):
'''simple docstring'''
return f'''SegmentTreeNode(start={self.start}, end={self.end}, val={self.val})'''
class _UpperCamelCase:
def __init__( self : List[str] , SCREAMING_SNAKE_CASE__ : Sequence , SCREAMING_SNAKE_CASE__ : Optional[Any] ):
'''simple docstring'''
__a : Tuple = collection
__a : Dict = function
if self.collection:
__a : int = self._build_tree(0 , len(SCREAMING_SNAKE_CASE__ ) - 1 )
def __lowerCAmelCase ( self : Optional[int] , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : int ):
'''simple docstring'''
self._update_tree(self.root , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def __lowerCAmelCase ( self : str , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : List[str] ):
'''simple docstring'''
return self._query_range(self.root , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def __lowerCAmelCase ( self : Optional[int] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Dict ):
'''simple docstring'''
if start == end:
return SegmentTreeNode(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , self.collection[start] )
__a : Tuple = (start + end) // 2
__a : Optional[int] = self._build_tree(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
__a : Tuple = self._build_tree(mid + 1 , SCREAMING_SNAKE_CASE__ )
return SegmentTreeNode(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , self.fn(left.val , right.val ) , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def __lowerCAmelCase ( self : List[Any] , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : List[Any] ):
'''simple docstring'''
if node.start == i and node.end == i:
__a : Optional[Any] = val
return
if i <= node.mid:
self._update_tree(node.left , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
else:
self._update_tree(node.right , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
__a : int = self.fn(node.left.val , node.right.val )
def __lowerCAmelCase ( self : Any , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Optional[Any] ):
'''simple docstring'''
if node.start == i and node.end == j:
return node.val
if i <= node.mid:
if j <= node.mid:
# range in left child tree
return self._query_range(node.left , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
else:
# range in left child tree and right child tree
return self.fn(
self._query_range(node.left , SCREAMING_SNAKE_CASE__ , node.mid ) , self._query_range(node.right , node.mid + 1 , SCREAMING_SNAKE_CASE__ ) , )
else:
# range in right child tree
return self._query_range(node.right , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def __lowerCAmelCase ( self : List[Any] ):
'''simple docstring'''
if self.root is not None:
__a : Tuple = Queue()
queue.put(self.root )
while not queue.empty():
__a : Tuple = queue.get()
yield node
if node.left is not None:
queue.put(node.left )
if node.right is not None:
queue.put(node.right )
if __name__ == "__main__":
import operator
for fn in [operator.add, max, min]:
print('''*''' * 50)
SCREAMING_SNAKE_CASE__ = SegmentTree([2, 1, 5, 3, 4], fn)
for node in arr.traverse():
print(node)
print()
arr.update(1, 5)
for node in arr.traverse():
print(node)
print()
print(arr.query_range(3, 4)) # 7
print(arr.query_range(2, 2)) # 5
print(arr.query_range(1, 3)) # 13
print()
| 47 | 1 |
def UpperCAmelCase__ ( lowerCamelCase_ : str ):
if n_term == "":
return []
__a : list = []
for temp in range(int(lowerCamelCase_ ) ):
series.append(f'''1/{temp + 1}''' if series else '1' )
return series
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ = input('''Enter the last number (nth term) of the Harmonic Series''')
print('''Formula of Harmonic Series => 1+1/2+1/3 ..... 1/n''')
print(harmonic_series(nth_term))
| 47 |
import os
import posixpath
import uuid
from dataclasses import dataclass
from typing import TYPE_CHECKING, Iterable, List, Optional, Tuple, Union
import numpy as np
import pyarrow as pa
import datasets
from datasets.arrow_writer import ArrowWriter, ParquetWriter
from datasets.config import MAX_SHARD_SIZE
from datasets.filesystems import (
is_remote_filesystem,
rename,
)
from datasets.iterable_dataset import _BaseExamplesIterable
from datasets.utils.py_utils import convert_file_size_to_int
SCREAMING_SNAKE_CASE__ = datasets.utils.logging.get_logger(__name__)
if TYPE_CHECKING:
import pyspark
@dataclass
class _UpperCamelCase( datasets.BuilderConfig ):
__SCREAMING_SNAKE_CASE : Optional[datasets.Features] = None
def UpperCAmelCase__ ( lowerCamelCase_ : "pyspark.sql.DataFrame" , lowerCamelCase_ : List[int] , ):
import pyspark
def generate_fn():
__a : List[Any] = df.select('*' , pyspark.sql.functions.spark_partition_id().alias('part_id' ) )
for partition_id in partition_order:
__a : Optional[int] = df_with_partition_id.select('*' ).where(f'''part_id = {partition_id}''' ).drop('part_id' )
__a : Optional[Any] = partition_df.collect()
__a : Union[str, Any] = 0
for row in rows:
yield f'''{partition_id}_{row_id}''', row.asDict()
row_id += 1
return generate_fn
class _UpperCamelCase( _BaseExamplesIterable ):
def __init__( self : Any , SCREAMING_SNAKE_CASE__ : "pyspark.sql.DataFrame" , SCREAMING_SNAKE_CASE__ : Dict=None , ):
'''simple docstring'''
__a : List[str] = df
__a : Tuple = partition_order or range(self.df.rdd.getNumPartitions() )
__a : List[Any] = _generate_iterable_examples(self.df , self.partition_order )
def __iter__( self : Tuple ):
'''simple docstring'''
yield from self.generate_examples_fn()
def __lowerCAmelCase ( self : Optional[int] , SCREAMING_SNAKE_CASE__ : np.random.Generator ):
'''simple docstring'''
__a : Union[str, Any] = list(range(self.df.rdd.getNumPartitions() ) )
generator.shuffle(SCREAMING_SNAKE_CASE__ )
return SparkExamplesIterable(self.df , partition_order=SCREAMING_SNAKE_CASE__ )
def __lowerCAmelCase ( self : str , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int ):
'''simple docstring'''
__a : Union[str, Any] = self.split_shard_indices_by_worker(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
return SparkExamplesIterable(self.df , partition_order=SCREAMING_SNAKE_CASE__ )
@property
def __lowerCAmelCase ( self : Dict ):
'''simple docstring'''
return len(self.partition_order )
class _UpperCamelCase( datasets.DatasetBuilder ):
__SCREAMING_SNAKE_CASE : List[str] = SparkConfig
def __init__( self : List[str] , SCREAMING_SNAKE_CASE__ : "pyspark.sql.DataFrame" , SCREAMING_SNAKE_CASE__ : str = None , SCREAMING_SNAKE_CASE__ : str = None , **SCREAMING_SNAKE_CASE__ : Optional[int] , ):
'''simple docstring'''
import pyspark
__a : int = pyspark.sql.SparkSession.builder.getOrCreate()
__a : Optional[int] = df
__a : List[Any] = working_dir
super().__init__(
cache_dir=SCREAMING_SNAKE_CASE__ , config_name=str(self.df.semanticHash() ) , **SCREAMING_SNAKE_CASE__ , )
def __lowerCAmelCase ( self : Optional[int] ):
'''simple docstring'''
def create_cache_and_write_probe(SCREAMING_SNAKE_CASE__ : List[str] ):
# makedirs with exist_ok will recursively create the directory. It will not throw an error if directories
# already exist.
os.makedirs(self._cache_dir , exist_ok=SCREAMING_SNAKE_CASE__ )
__a : List[Any] = os.path.join(self._cache_dir , 'fs_test' + uuid.uuida().hex )
# Opening the file in append mode will create a new file unless it already exists, in which case it will not
# change the file contents.
open(SCREAMING_SNAKE_CASE__ , 'a' )
return [probe_file]
if self._spark.conf.get('spark.master' , '' ).startswith('local' ):
return
# If the cluster is multi-node, make sure that the user provided a cache_dir and that it is on an NFS
# accessible to the driver.
# TODO: Stream batches to the driver using ArrowCollectSerializer instead of throwing an error.
if self._cache_dir:
__a : List[Any] = (
self._spark.sparkContext.parallelize(range(1 ) , 1 ).mapPartitions(SCREAMING_SNAKE_CASE__ ).collect()
)
if os.path.isfile(probe[0] ):
return
raise ValueError(
'When using Dataset.from_spark on a multi-node cluster, the driver and all workers should be able to access cache_dir' )
def __lowerCAmelCase ( self : List[str] ):
'''simple docstring'''
return datasets.DatasetInfo(features=self.config.features )
def __lowerCAmelCase ( self : int , SCREAMING_SNAKE_CASE__ : datasets.download.download_manager.DownloadManager ):
'''simple docstring'''
return [datasets.SplitGenerator(name=datasets.Split.TRAIN )]
def __lowerCAmelCase ( self : List[str] , SCREAMING_SNAKE_CASE__ : Any ):
'''simple docstring'''
import pyspark
def get_arrow_batch_size(SCREAMING_SNAKE_CASE__ : int ):
for batch in it:
yield pa.RecordBatch.from_pydict({'batch_bytes': [batch.nbytes]} )
__a : List[str] = self.df.count()
__a : Dict = df_num_rows if df_num_rows <= 1_0_0 else 1_0_0
# Approximate the size of each row (in Arrow format) by averaging over a max-100-row sample.
__a : List[str] = (
self.df.limit(SCREAMING_SNAKE_CASE__ )
.repartition(1 )
.mapInArrow(SCREAMING_SNAKE_CASE__ , 'batch_bytes: long' )
.agg(pyspark.sql.functions.sum('batch_bytes' ).alias('sample_bytes' ) )
.collect()[0]
.sample_bytes
/ sample_num_rows
)
__a : Dict = approx_bytes_per_row * df_num_rows
if approx_total_size > max_shard_size:
# Make sure there is at least one row per partition.
__a : Union[str, Any] = min(SCREAMING_SNAKE_CASE__ , int(approx_total_size / max_shard_size ) )
__a : int = self.df.repartition(SCREAMING_SNAKE_CASE__ )
def __lowerCAmelCase ( self : Tuple , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : int , ):
'''simple docstring'''
import pyspark
__a : Any = ParquetWriter if file_format == 'parquet' else ArrowWriter
__a : Union[str, Any] = os.path.join(self._working_dir , os.path.basename(SCREAMING_SNAKE_CASE__ ) ) if self._working_dir else fpath
__a : Optional[int] = file_format == 'parquet'
# Define these so that we don't reference self in write_arrow, which will result in a pickling error due to
# pickling the SparkContext.
__a : List[str] = self.config.features
__a : int = self._writer_batch_size
__a : Union[str, Any] = self._fs.storage_options
def write_arrow(SCREAMING_SNAKE_CASE__ : Optional[int] ):
# Within the same SparkContext, no two task attempts will share the same attempt ID.
__a : Any = pyspark.TaskContext().taskAttemptId()
__a : str = next(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
if first_batch is None:
# Some partitions might not receive any data.
return pa.RecordBatch.from_arrays(
[[task_id], [0], [0]] , names=['task_id', 'num_examples', 'num_bytes'] , )
__a : Any = 0
__a : List[str] = writer_class(
features=SCREAMING_SNAKE_CASE__ , path=working_fpath.replace('SSSSS' , f'''{shard_id:05d}''' ).replace('TTTTT' , f'''{task_id:05d}''' ) , writer_batch_size=SCREAMING_SNAKE_CASE__ , storage_options=SCREAMING_SNAKE_CASE__ , embed_local_files=SCREAMING_SNAKE_CASE__ , )
__a : Optional[Any] = pa.Table.from_batches([first_batch] )
writer.write_table(SCREAMING_SNAKE_CASE__ )
for batch in it:
if max_shard_size is not None and writer._num_bytes >= max_shard_size:
__a , __a : Optional[int] = writer.finalize()
writer.close()
yield pa.RecordBatch.from_arrays(
[[task_id], [num_examples], [num_bytes]] , names=['task_id', 'num_examples', 'num_bytes'] , )
shard_id += 1
__a : Optional[Any] = writer_class(
features=writer._features , path=working_fpath.replace('SSSSS' , f'''{shard_id:05d}''' ).replace('TTTTT' , f'''{task_id:05d}''' ) , writer_batch_size=SCREAMING_SNAKE_CASE__ , storage_options=SCREAMING_SNAKE_CASE__ , embed_local_files=SCREAMING_SNAKE_CASE__ , )
__a : Union[str, Any] = pa.Table.from_batches([batch] )
writer.write_table(SCREAMING_SNAKE_CASE__ )
if writer._num_bytes > 0:
__a , __a : str = writer.finalize()
writer.close()
yield pa.RecordBatch.from_arrays(
[[task_id], [num_examples], [num_bytes]] , names=['task_id', 'num_examples', 'num_bytes'] , )
if working_fpath != fpath:
for file in os.listdir(os.path.dirname(SCREAMING_SNAKE_CASE__ ) ):
__a : Any = os.path.join(os.path.dirname(SCREAMING_SNAKE_CASE__ ) , os.path.basename(SCREAMING_SNAKE_CASE__ ) )
shutil.move(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
__a : Dict = (
self.df.mapInArrow(SCREAMING_SNAKE_CASE__ , 'task_id: long, num_examples: long, num_bytes: long' )
.groupBy('task_id' )
.agg(
pyspark.sql.functions.sum('num_examples' ).alias('total_num_examples' ) , pyspark.sql.functions.sum('num_bytes' ).alias('total_num_bytes' ) , pyspark.sql.functions.count('num_bytes' ).alias('num_shards' ) , pyspark.sql.functions.collect_list('num_examples' ).alias('shard_lengths' ) , )
.collect()
)
for row in stats:
yield row.task_id, (row.total_num_examples, row.total_num_bytes, row.num_shards, row.shard_lengths)
def __lowerCAmelCase ( self : Union[str, Any] , SCREAMING_SNAKE_CASE__ : "datasets.SplitGenerator" , SCREAMING_SNAKE_CASE__ : str = "arrow" , SCREAMING_SNAKE_CASE__ : Optional[Union[str, int]] = None , SCREAMING_SNAKE_CASE__ : Optional[int] = None , **SCREAMING_SNAKE_CASE__ : Optional[Any] , ):
'''simple docstring'''
self._validate_cache_dir()
__a : List[str] = convert_file_size_to_int(max_shard_size or MAX_SHARD_SIZE )
self._repartition_df_if_needed(SCREAMING_SNAKE_CASE__ )
__a : Union[str, Any] = not is_remote_filesystem(self._fs )
__a : Optional[Any] = os.path.join if is_local else posixpath.join
__a : Any = '-TTTTT-SSSSS-of-NNNNN'
__a : Union[str, Any] = f'''{self.name}-{split_generator.name}{SUFFIX}.{file_format}'''
__a : Any = path_join(self._output_dir , SCREAMING_SNAKE_CASE__ )
__a : Any = 0
__a : Dict = 0
__a : int = 0
__a : List[str] = []
__a : Optional[int] = []
for task_id, content in self._prepare_split_single(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
(
(
__a
) , (
__a
) , (
__a
) , (
__a
) ,
) : Optional[int] = content
if num_bytes > 0:
total_num_examples += num_examples
total_num_bytes += num_bytes
total_shards += num_shards
task_id_and_num_shards.append((task_id, num_shards) )
all_shard_lengths.extend(SCREAMING_SNAKE_CASE__ )
__a : List[str] = total_num_examples
__a : Optional[int] = total_num_bytes
# should rename everything at the end
logger.debug(f'''Renaming {total_shards} shards.''' )
if total_shards > 1:
__a : Any = all_shard_lengths
# Define fs outside of _rename_shard so that we don't reference self in the function, which will result in a
# pickling error due to pickling the SparkContext.
__a : Dict = self._fs
# use the -SSSSS-of-NNNNN pattern
def _rename_shard(
SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int , ):
rename(
SCREAMING_SNAKE_CASE__ , fpath.replace('SSSSS' , f'''{shard_id:05d}''' ).replace('TTTTT' , f'''{task_id:05d}''' ) , fpath.replace('TTTTT-SSSSS' , f'''{global_shard_id:05d}''' ).replace('NNNNN' , f'''{total_shards:05d}''' ) , )
__a : Union[str, Any] = []
__a : List[str] = 0
for i in range(len(SCREAMING_SNAKE_CASE__ ) ):
__a , __a : Union[str, Any] = task_id_and_num_shards[i]
for shard_id in range(SCREAMING_SNAKE_CASE__ ):
args.append([task_id, shard_id, global_shard_id] )
global_shard_id += 1
self._spark.sparkContext.parallelize(SCREAMING_SNAKE_CASE__ , len(SCREAMING_SNAKE_CASE__ ) ).map(lambda SCREAMING_SNAKE_CASE__ : _rename_shard(*SCREAMING_SNAKE_CASE__ ) ).collect()
else:
# don't use any pattern
__a : List[Any] = 0
__a : Any = task_id_and_num_shards[0][0]
self._rename(
fpath.replace('SSSSS' , f'''{shard_id:05d}''' ).replace('TTTTT' , f'''{task_id:05d}''' ) , fpath.replace(SCREAMING_SNAKE_CASE__ , '' ) , )
def __lowerCAmelCase ( self : str , SCREAMING_SNAKE_CASE__ : "datasets.SplitGenerator" , ):
'''simple docstring'''
return SparkExamplesIterable(self.df )
| 47 | 1 |
import os
SCREAMING_SNAKE_CASE__ = {'''I''': 1, '''V''': 5, '''X''': 10, '''L''': 50, '''C''': 100, '''D''': 500, '''M''': 1000}
def UpperCAmelCase__ ( lowerCamelCase_ : str ):
__a : Optional[Any] = 0
__a : Dict = 0
while index < len(lowerCamelCase_ ) - 1:
__a : Optional[Any] = SYMBOLS[numerals[index]]
__a : Union[str, Any] = SYMBOLS[numerals[index + 1]]
if current_value < next_value:
total_value -= current_value
else:
total_value += current_value
index += 1
total_value += SYMBOLS[numerals[index]]
return total_value
def UpperCAmelCase__ ( lowerCamelCase_ : int ):
__a : Optional[Any] = ''
__a : int = num // 1_0_0_0
numerals += m_count * "M"
num %= 1_0_0_0
__a : Tuple = num // 1_0_0
if c_count == 9:
numerals += "CM"
c_count -= 9
elif c_count == 4:
numerals += "CD"
c_count -= 4
if c_count >= 5:
numerals += "D"
c_count -= 5
numerals += c_count * "C"
num %= 1_0_0
__a : Optional[int] = num // 1_0
if x_count == 9:
numerals += "XC"
x_count -= 9
elif x_count == 4:
numerals += "XL"
x_count -= 4
if x_count >= 5:
numerals += "L"
x_count -= 5
numerals += x_count * "X"
num %= 1_0
if num == 9:
numerals += "IX"
num -= 9
elif num == 4:
numerals += "IV"
num -= 4
if num >= 5:
numerals += "V"
num -= 5
numerals += num * "I"
return numerals
def UpperCAmelCase__ ( lowerCamelCase_ : str = "/p089_roman.txt" ):
__a : List[Any] = 0
with open(os.path.dirname(lowerCamelCase_ ) + roman_numerals_filename ) as filea:
__a : Tuple = filea.readlines()
for line in lines:
__a : str = line.strip()
__a : Dict = parse_roman_numerals(lowerCamelCase_ )
__a : str = generate_roman_numerals(lowerCamelCase_ )
savings += len(lowerCamelCase_ ) - len(lowerCamelCase_ )
return savings
if __name__ == "__main__":
print(F"{solution() = }")
| 47 |
import argparse
import logging
import os
from datetime import datetime
import numpy as np
import torch
from torch import nn
from torch.utils.data import DataLoader, RandomSampler, TensorDataset
from tqdm import tqdm
from transformers import GPTaLMHeadModel
SCREAMING_SNAKE_CASE__ = logging.getLogger(__name__)
def UpperCAmelCase__ ( lowerCamelCase_ : str , lowerCamelCase_ : int ):
# save results
if os.path.exists(lowerCamelCase_ ):
if os.path.exists(os.path.join(lowerCamelCase_ , 'config.json' ) ) and os.path.isfile(
os.path.join(lowerCamelCase_ , 'config.json' ) ):
os.remove(os.path.join(lowerCamelCase_ , 'config.json' ) )
if os.path.exists(os.path.join(lowerCamelCase_ , 'pytorch_model.bin' ) ) and os.path.isfile(
os.path.join(lowerCamelCase_ , 'pytorch_model.bin' ) ):
os.remove(os.path.join(lowerCamelCase_ , 'pytorch_model.bin' ) )
else:
os.makedirs(lowerCamelCase_ )
model.save_pretrained(lowerCamelCase_ )
def UpperCAmelCase__ ( lowerCamelCase_ : int , lowerCamelCase_ : Any=False ):
__a : Dict = 2
if unlogit:
__a : Optional[Any] = torch.pow(lowerCamelCase_ , lowerCamelCase_ )
__a : Any = p * torch.log(lowerCamelCase_ )
__a : Union[str, Any] = 0
return -plogp.sum(dim=-1 )
def UpperCAmelCase__ ( lowerCamelCase_ : Any ):
logger.info('lv, h >\t' + '\t'.join(f'''{x + 1}''' for x in range(len(lowerCamelCase_ ) ) ) )
for row in range(len(lowerCamelCase_ ) ):
if tensor.dtype != torch.long:
logger.info(f'''layer {row + 1}:\t''' + '\t'.join(f'''{x:.5f}''' for x in tensor[row].cpu().data ) )
else:
logger.info(f'''layer {row + 1}:\t''' + '\t'.join(f'''{x:d}''' for x in tensor[row].cpu().data ) )
def UpperCAmelCase__ ( lowerCamelCase_ : Optional[int] , lowerCamelCase_ : Any , lowerCamelCase_ : int , lowerCamelCase_ : int=True , lowerCamelCase_ : Optional[Any]=True , lowerCamelCase_ : List[Any]=None , lowerCamelCase_ : List[Any]=False ):
__a , __a : Optional[int] = model.config.num_hidden_layers, model.config.num_attention_heads
__a : str = torch.zeros(lowerCamelCase_ , lowerCamelCase_ ).to(args.device )
__a : int = torch.zeros(lowerCamelCase_ , lowerCamelCase_ ).to(args.device )
if head_mask is None:
__a : Union[str, Any] = torch.ones(lowerCamelCase_ , lowerCamelCase_ ).to(args.device )
head_mask.requires_grad_(requires_grad=lowerCamelCase_ )
# If actually pruned attention multi-head, set head mask to None to avoid shape mismatch
if actually_pruned:
__a : Any = None
__a : Optional[int] = 0.0
__a : Optional[Any] = 0.0
for step, inputs in enumerate(tqdm(lowerCamelCase_ , desc='Iteration' , disable=args.local_rank not in [-1, 0] ) ):
__a : Dict = tuple(t.to(args.device ) for t in inputs )
((__a) , ) : Dict = inputs
# Do a forward pass (not with torch.no_grad() since we need gradients for importance score - see below)
__a : List[Any] = model(lowerCamelCase_ , labels=lowerCamelCase_ , head_mask=lowerCamelCase_ )
# (loss), lm_logits, presents, (all hidden_states), (attentions)
__a , __a , __a : int = (
outputs[0],
outputs[1],
outputs[-1],
) # Loss and logits are the first, attention the last
loss.backward() # Backpropagate to populate the gradients in the head mask
total_loss += loss.detach().cpu().numpy()
if compute_entropy:
for layer, attn in enumerate(lowerCamelCase_ ):
__a : List[str] = entropy(attn.detach() , lowerCamelCase_ )
attn_entropy[layer] += masked_entropy.sum(-1 ).sum(0 ).sum(0 ).detach()
if compute_importance:
head_importance += head_mask.grad.abs().detach()
tot_tokens += torch.ones_like(lowerCamelCase_ ).float().detach().sum().data
# Normalize
attn_entropy /= tot_tokens
head_importance /= tot_tokens
# Layerwise importance normalization
if not args.dont_normalize_importance_by_layer:
__a : Optional[Any] = 2
__a : Union[str, Any] = torch.pow(torch.pow(lowerCamelCase_ , lowerCamelCase_ ).sum(-1 ) , 1 / exponent )
head_importance /= norm_by_layer.unsqueeze(-1 ) + 1e-20
if not args.dont_normalize_global_importance:
__a : List[str] = (head_importance - head_importance.min()) / (head_importance.max() - head_importance.min())
# Print matrices
if compute_entropy:
logger.info('Attention entropies' )
print_ad_tensor(lowerCamelCase_ )
if compute_importance:
logger.info('Head importance scores' )
print_ad_tensor(lowerCamelCase_ )
logger.info('Head ranked by importance scores' )
__a : Optional[Any] = torch.zeros(head_importance.numel() , dtype=torch.long , device=args.device )
__a : str = torch.arange(
head_importance.numel() , device=args.device )
__a : Tuple = head_ranks.view_as(lowerCamelCase_ )
print_ad_tensor(lowerCamelCase_ )
return attn_entropy, head_importance, total_loss
def UpperCAmelCase__ ( lowerCamelCase_ : Union[str, Any] , lowerCamelCase_ : Tuple , lowerCamelCase_ : int ):
__a , __a , __a : Optional[int] = compute_heads_importance(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , compute_entropy=lowerCamelCase_ )
__a : Tuple = 1 / loss # instead of downsteam score use the LM loss
logger.info('Pruning: original score: %f, threshold: %f' , lowerCamelCase_ , original_score * args.masking_threshold )
__a : Tuple = torch.ones_like(lowerCamelCase_ )
__a : int = max(1 , int(new_head_mask.numel() * args.masking_amount ) )
__a : Tuple = original_score
while current_score >= original_score * args.masking_threshold:
__a : Optional[Any] = new_head_mask.clone().detach() # save current head mask
# heads from least important to most - keep only not-masked heads
__a : List[str] = float('Inf' )
__a : List[Any] = head_importance.view(-1 ).sort()[1]
if len(lowerCamelCase_ ) <= num_to_mask:
print('BREAK BY num_to_mask' )
break
# mask heads
__a : Any = current_heads_to_mask[:num_to_mask]
logger.info('Heads to mask: %s' , str(current_heads_to_mask.tolist() ) )
__a : int = new_head_mask.view(-1 )
__a : Tuple = 0.0
__a : int = new_head_mask.view_as(lowerCamelCase_ )
__a : Optional[int] = new_head_mask.clone().detach()
print_ad_tensor(lowerCamelCase_ )
# Compute metric and head importance again
__a , __a , __a : int = compute_heads_importance(
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , compute_entropy=lowerCamelCase_ , head_mask=lowerCamelCase_ )
__a : List[Any] = 1 / loss
logger.info(
'Masking: current score: %f, remaining heads %d (%.1f percents)' , lowerCamelCase_ , new_head_mask.sum() , new_head_mask.sum() / new_head_mask.numel() * 1_0_0 , )
logger.info('Final head mask' )
print_ad_tensor(lowerCamelCase_ )
np.save(os.path.join(args.output_dir , 'head_mask.npy' ) , head_mask.detach().cpu().numpy() )
return head_mask
def UpperCAmelCase__ ( lowerCamelCase_ : Dict , lowerCamelCase_ : Tuple , lowerCamelCase_ : Any , lowerCamelCase_ : Union[str, Any] ):
__a : List[Any] = datetime.now()
__a , __a , __a : List[str] = compute_heads_importance(
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , compute_entropy=lowerCamelCase_ , compute_importance=lowerCamelCase_ , head_mask=lowerCamelCase_ )
__a : List[str] = 1 / loss
__a : List[Any] = datetime.now() - before_time
__a : List[str] = sum(p.numel() for p in model.parameters() )
__a : Dict = {
layer: (1 - head_mask[layer].long()).nonzero().squeeze().tolist() for layer in range(len(lowerCamelCase_ ) )
}
for k, v in heads_to_prune.items():
if isinstance(lowerCamelCase_ , lowerCamelCase_ ):
__a : Tuple = [
v,
]
assert sum(len(lowerCamelCase_ ) for h in heads_to_prune.values() ) == (1 - head_mask.long()).sum().item()
model.prune_heads(lowerCamelCase_ )
__a : Optional[Any] = sum(p.numel() for p in model.parameters() )
__a : Tuple = datetime.now()
__a , __a , __a : Tuple = compute_heads_importance(
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , compute_entropy=lowerCamelCase_ , compute_importance=lowerCamelCase_ , head_mask=lowerCamelCase_ , actually_pruned=lowerCamelCase_ , )
__a : Optional[Any] = 1 / loss
__a : List[Any] = datetime.now() - before_time
logger.info(
'Pruning: original num of params: %.2e, after pruning %.2e (%.1f percents)' , lowerCamelCase_ , lowerCamelCase_ , pruned_num_params / original_num_params * 1_0_0 , )
logger.info('Pruning: score with masking: %f score with pruning: %f' , lowerCamelCase_ , lowerCamelCase_ )
logger.info('Pruning: speed ratio (original timing / new timing): %f percents' , original_time / new_time * 1_0_0 )
save_model(lowerCamelCase_ , args.output_dir )
def UpperCAmelCase__ ( ):
__a : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--data_dir' , default=lowerCamelCase_ , type=lowerCamelCase_ , required=lowerCamelCase_ , help='The input data dir. Should contain the .tsv files (or other data files) for the task.' , )
parser.add_argument(
'--model_name_or_path' , default=lowerCamelCase_ , type=lowerCamelCase_ , required=lowerCamelCase_ , help='Path to pretrained model or model identifier from huggingface.co/models' , )
parser.add_argument(
'--output_dir' , default=lowerCamelCase_ , type=lowerCamelCase_ , required=lowerCamelCase_ , help='The output directory where the model predictions and checkpoints will be written.' , )
# Other parameters
parser.add_argument(
'--config_name' , default='' , type=lowerCamelCase_ , help='Pretrained config name or path if not the same as model_name_or_path' , )
parser.add_argument(
'--tokenizer_name' , default='' , type=lowerCamelCase_ , help='Pretrained tokenizer name or path if not the same as model_name_or_path' , )
parser.add_argument(
'--cache_dir' , default=lowerCamelCase_ , type=lowerCamelCase_ , help='Where do you want to store the pre-trained models downloaded from s3' , )
parser.add_argument(
'--data_subset' , type=lowerCamelCase_ , default=-1 , help='If > 0: limit the data to a subset of data_subset instances.' )
parser.add_argument(
'--overwrite_output_dir' , action='store_true' , help='Whether to overwrite data in output directory' )
parser.add_argument(
'--overwrite_cache' , action='store_true' , help='Overwrite the cached training and evaluation sets' )
parser.add_argument(
'--dont_normalize_importance_by_layer' , action='store_true' , help='Don\'t normalize importance score by layers' )
parser.add_argument(
'--dont_normalize_global_importance' , action='store_true' , help='Don\'t normalize all importance scores between 0 and 1' , )
parser.add_argument(
'--try_masking' , action='store_true' , help='Whether to try to mask head until a threshold of accuracy.' )
parser.add_argument(
'--masking_threshold' , default=0.9 , type=lowerCamelCase_ , help='masking threshold in term of metrics (stop masking when metric < threshold * original metric value).' , )
parser.add_argument(
'--masking_amount' , default=0.1 , type=lowerCamelCase_ , help='Amount to heads to masking at each masking step.' )
parser.add_argument('--metric_name' , default='acc' , type=lowerCamelCase_ , help='Metric to use for head masking.' )
parser.add_argument(
'--max_seq_length' , default=1_2_8 , type=lowerCamelCase_ , help=(
'The maximum total input sequence length after WordPiece tokenization. \n'
'Sequences longer than this will be truncated, sequences shorter padded.'
) , )
parser.add_argument('--batch_size' , default=1 , type=lowerCamelCase_ , help='Batch size.' )
parser.add_argument('--seed' , type=lowerCamelCase_ , default=4_2 )
parser.add_argument('--local_rank' , type=lowerCamelCase_ , default=-1 , help='local_rank for distributed training on gpus' )
parser.add_argument('--no_cuda' , action='store_true' , help='Whether not to use CUDA when available' )
parser.add_argument('--server_ip' , type=lowerCamelCase_ , default='' , help='Can be used for distant debugging.' )
parser.add_argument('--server_port' , type=lowerCamelCase_ , default='' , help='Can be used for distant debugging.' )
__a : Optional[Any] = parser.parse_args()
if args.server_ip and args.server_port:
# Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script
import ptvsd
print('Waiting for debugger attach' )
ptvsd.enable_attach(address=(args.server_ip, args.server_port) , redirect_output=lowerCamelCase_ )
ptvsd.wait_for_attach()
# Setup devices and distributed training
if args.local_rank == -1 or args.no_cuda:
__a : List[str] = torch.device('cuda' if torch.cuda.is_available() and not args.no_cuda else 'cpu' )
__a : Tuple = 0 if args.no_cuda else torch.cuda.device_count()
else:
torch.cuda.set_device(args.local_rank )
__a : Union[str, Any] = torch.device('cuda' , args.local_rank )
__a : Any = 1
torch.distributed.init_process_group(backend='nccl' ) # Initializes the distributed backend
# Setup logging
logging.basicConfig(level=logging.INFO if args.local_rank in [-1, 0] else logging.WARN )
logger.info('device: {} n_gpu: {}, distributed: {}'.format(args.device , args.n_gpu , bool(args.local_rank != -1 ) ) )
__a : Optional[Any] = GPTaLMHeadModel.from_pretrained(args.model_name_or_path )
# Distributed and parallel training
model.to(args.device )
if args.local_rank != -1:
__a : List[Any] = nn.parallel.DistributedDataParallel(
lowerCamelCase_ , device_ids=[args.local_rank] , output_device=args.local_rank , find_unused_parameters=lowerCamelCase_ )
elif args.n_gpu > 1:
__a : Union[str, Any] = nn.DataParallel(lowerCamelCase_ )
# Print/save training arguments
os.makedirs(args.output_dir , exist_ok=lowerCamelCase_ )
torch.save(lowerCamelCase_ , os.path.join(args.output_dir , 'run_args.bin' ) )
logger.info('Training/evaluation parameters %s' , lowerCamelCase_ )
# Prepare dataset
__a : Tuple = np.concatenate(
[
np.loadtxt(args.data_dir , dtype=np.intaa ),
] )
__a : str = (torch.from_numpy(lowerCamelCase_ ),)
__a : List[str] = TensorDataset(*lowerCamelCase_ )
__a : Optional[Any] = RandomSampler(lowerCamelCase_ )
__a : Union[str, Any] = DataLoader(lowerCamelCase_ , sampler=lowerCamelCase_ , batch_size=args.batch_size )
# Compute head entropy and importance score
compute_heads_importance(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
# Try head masking (set heads to zero until the score goes under a threshole)
# and head pruning (remove masked heads and see the effect on the network)
if args.try_masking and args.masking_threshold > 0.0 and args.masking_threshold < 1.0:
__a : Union[str, Any] = mask_heads(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
prune_heads(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
if __name__ == "__main__":
main()
| 47 | 1 |
from datetime import datetime
import requests
def UpperCAmelCase__ ( lowerCamelCase_ : str ):
__a : Dict = 'https://downloadgram.net/wp-json/wppress/video-downloader/video?url='
__a : Dict = requests.get(base_url + url ).json()[0]['urls'][0]['src']
return requests.get(lowerCamelCase_ ).content
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ = input('''Enter Video/IGTV url: ''').strip()
SCREAMING_SNAKE_CASE__ = F"{datetime.now():%Y-%m-%d_%H:%M:%S}.mp4"
with open(file_name, '''wb''') as fp:
fp.write(download_video(url))
print(F"Done. Video saved to disk as {file_name}.")
| 47 |
import argparse
import os
import gluonnlp as nlp
import mxnet as mx
import numpy as np
import torch
from gluonnlp.base import get_home_dir
from gluonnlp.model.bert import BERTEncoder
from gluonnlp.model.utils import _load_vocab
from gluonnlp.vocab import Vocab
from packaging import version
from torch import nn
from transformers import BertConfig, BertForMaskedLM, BertModel, RobertaTokenizer
from transformers.models.bert.modeling_bert import (
BertIntermediate,
BertLayer,
BertOutput,
BertSelfAttention,
BertSelfOutput,
)
from transformers.utils import logging
if version.parse(nlp.__version__) != version.parse('''0.8.3'''):
raise Exception('''requires gluonnlp == 0.8.3''')
if version.parse(mx.__version__) != version.parse('''1.5.0'''):
raise Exception('''requires mxnet == 1.5.0''')
logging.set_verbosity_info()
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ = '''The Nymphenburg Palace is a beautiful palace in Munich!'''
def UpperCAmelCase__ ( lowerCamelCase_ : str , lowerCamelCase_ : str ):
__a : List[Any] = {
'attention_cell': 'multi_head',
'num_layers': 4,
'units': 1_0_2_4,
'hidden_size': 7_6_8,
'max_length': 5_1_2,
'num_heads': 8,
'scaled': True,
'dropout': 0.1,
'use_residual': True,
'embed_size': 1_0_2_4,
'embed_dropout': 0.1,
'word_embed': None,
'layer_norm_eps': 1e-5,
'token_type_vocab_size': 2,
}
__a : Optional[int] = bort_4_8_768_1024_hparams
# Let's construct the original Bort model here
# Taken from official BERT implementation, see:
# https://github.com/alexa/bort/blob/master/bort/bort.py
__a : List[str] = BERTEncoder(
attention_cell=predefined_args['attention_cell'] , num_layers=predefined_args['num_layers'] , units=predefined_args['units'] , hidden_size=predefined_args['hidden_size'] , max_length=predefined_args['max_length'] , num_heads=predefined_args['num_heads'] , scaled=predefined_args['scaled'] , dropout=predefined_args['dropout'] , output_attention=lowerCamelCase_ , output_all_encodings=lowerCamelCase_ , use_residual=predefined_args['use_residual'] , activation=predefined_args.get('activation' , 'gelu' ) , layer_norm_eps=predefined_args.get('layer_norm_eps' , lowerCamelCase_ ) , )
# Vocab information needs to be fetched first
# It's the same as RoBERTa, so RobertaTokenizer can be used later
__a : int = 'openwebtext_ccnews_stories_books_cased'
# Specify download folder to Gluonnlp's vocab
__a : Optional[Any] = os.path.join(get_home_dir() , 'models' )
__a : Optional[Any] = _load_vocab(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , cls=lowerCamelCase_ )
__a : Any = nlp.model.BERTModel(
lowerCamelCase_ , len(lowerCamelCase_ ) , units=predefined_args['units'] , embed_size=predefined_args['embed_size'] , embed_dropout=predefined_args['embed_dropout'] , word_embed=predefined_args['word_embed'] , use_pooler=lowerCamelCase_ , use_token_type_embed=lowerCamelCase_ , token_type_vocab_size=predefined_args['token_type_vocab_size'] , use_classifier=lowerCamelCase_ , use_decoder=lowerCamelCase_ , )
original_bort.load_parameters(lowerCamelCase_ , cast_dtype=lowerCamelCase_ , ignore_extra=lowerCamelCase_ )
__a : Dict = original_bort._collect_params_with_prefix()
# Build our config 🤗
__a : Optional[Any] = {
'architectures': ['BertForMaskedLM'],
'attention_probs_dropout_prob': predefined_args['dropout'],
'hidden_act': 'gelu',
'hidden_dropout_prob': predefined_args['dropout'],
'hidden_size': predefined_args['embed_size'],
'initializer_range': 0.02,
'intermediate_size': predefined_args['hidden_size'],
'layer_norm_eps': predefined_args['layer_norm_eps'],
'max_position_embeddings': predefined_args['max_length'],
'model_type': 'bort',
'num_attention_heads': predefined_args['num_heads'],
'num_hidden_layers': predefined_args['num_layers'],
'pad_token_id': 1, # 2 = BERT, 1 = RoBERTa
'type_vocab_size': 1, # 2 = BERT, 1 = RoBERTa
'vocab_size': len(lowerCamelCase_ ),
}
__a : str = BertConfig.from_dict(lowerCamelCase_ )
__a : Optional[int] = BertForMaskedLM(lowerCamelCase_ )
hf_bort_model.eval()
# Parameter mapping table (Gluonnlp to Transformers)
# * denotes layer index
#
# | Gluon Parameter | Transformers Parameter
# | -------------------------------------------------------------- | ----------------------
# | `encoder.layer_norm.beta` | `bert.embeddings.LayerNorm.bias`
# | `encoder.layer_norm.gamma` | `bert.embeddings.LayerNorm.weight`
# | `encoder.position_weight` | `bert.embeddings.position_embeddings.weight`
# | `word_embed.0.weight` | `bert.embeddings.word_embeddings.weight`
# | `encoder.transformer_cells.*.attention_cell.proj_key.bias` | `bert.encoder.layer.*.attention.self.key.bias`
# | `encoder.transformer_cells.*.attention_cell.proj_key.weight` | `bert.encoder.layer.*.attention.self.key.weight`
# | `encoder.transformer_cells.*.attention_cell.proj_query.bias` | `bert.encoder.layer.*.attention.self.query.bias`
# | `encoder.transformer_cells.*.attention_cell.proj_query.weight` | `bert.encoder.layer.*.attention.self.query.weight`
# | `encoder.transformer_cells.*.attention_cell.proj_value.bias` | `bert.encoder.layer.*.attention.self.value.bias`
# | `encoder.transformer_cells.*.attention_cell.proj_value.weight` | `bert.encoder.layer.*.attention.self.value.weight`
# | `encoder.transformer_cells.*.ffn.ffn_2.bias` | `bert.encoder.layer.*.attention.output.dense.bias`
# | `encoder.transformer_cells.*.ffn.ffn_2.weight` | `bert.encoder.layer.*.attention.output.dense.weight`
# | `encoder.transformer_cells.*.layer_norm.beta` | `bert.encoder.layer.*.attention.output.LayerNorm.bias`
# | `encoder.transformer_cells.*.layer_norm.gamma` | `bert.encoder.layer.*.attention.output.LayerNorm.weight`
# | `encoder.transformer_cells.*.ffn.ffn_1.bias` | `bert.encoder.layer.*.intermediate.dense.bias`
# | `encoder.transformer_cells.*.ffn.ffn_1.weight` | `bert.encoder.layer.*.intermediate.dense.weight`
# | `encoder.transformer_cells.*.ffn.layer_norm.beta` | `bert.encoder.layer.*.output.LayerNorm.bias`
# | `encoder.transformer_cells.*.ffn.layer_norm.gamma` | `bert.encoder.layer.*.output.LayerNorm.weight`
# | `encoder.transformer_cells.*.proj.bias` | `bert.encoder.layer.*.output.dense.bias`
# | `encoder.transformer_cells.*.proj.weight` | `bert.encoder.layer.*.output.dense.weight`
# Helper function to convert MXNET Arrays to PyTorch
def to_torch(lowerCamelCase_ : Optional[Any] ) -> nn.Parameter:
return nn.Parameter(torch.FloatTensor(mx_array.data().asnumpy() ) )
# Check param shapes and map new HF param back
def check_and_map_params(lowerCamelCase_ : Optional[Any] , lowerCamelCase_ : List[str] ):
__a : Optional[int] = hf_param.shape
__a : int = to_torch(params[gluon_param] )
__a : int = gluon_param.shape
assert (
shape_hf == shape_gluon
), f'''The gluon parameter {gluon_param} has shape {shape_gluon}, but expects shape {shape_hf} for Transformers'''
return gluon_param
__a : str = check_and_map_params(
hf_bort_model.bert.embeddings.word_embeddings.weight , 'word_embed.0.weight' )
__a : str = check_and_map_params(
hf_bort_model.bert.embeddings.position_embeddings.weight , 'encoder.position_weight' )
__a : Tuple = check_and_map_params(
hf_bort_model.bert.embeddings.LayerNorm.bias , 'encoder.layer_norm.beta' )
__a : Union[str, Any] = check_and_map_params(
hf_bort_model.bert.embeddings.LayerNorm.weight , 'encoder.layer_norm.gamma' )
# Inspired by RoBERTa conversion script, we just zero them out (Bort does not use them)
__a : Union[str, Any] = torch.zeros_like(
hf_bort_model.bert.embeddings.token_type_embeddings.weight.data )
for i in range(hf_bort_config.num_hidden_layers ):
__a : BertLayer = hf_bort_model.bert.encoder.layer[i]
# self attention
__a : BertSelfAttention = layer.attention.self
__a : Optional[int] = check_and_map_params(
self_attn.key.bias.data , f'''encoder.transformer_cells.{i}.attention_cell.proj_key.bias''' )
__a : str = check_and_map_params(
self_attn.key.weight.data , f'''encoder.transformer_cells.{i}.attention_cell.proj_key.weight''' )
__a : List[str] = check_and_map_params(
self_attn.query.bias.data , f'''encoder.transformer_cells.{i}.attention_cell.proj_query.bias''' )
__a : str = check_and_map_params(
self_attn.query.weight.data , f'''encoder.transformer_cells.{i}.attention_cell.proj_query.weight''' )
__a : Dict = check_and_map_params(
self_attn.value.bias.data , f'''encoder.transformer_cells.{i}.attention_cell.proj_value.bias''' )
__a : str = check_and_map_params(
self_attn.value.weight.data , f'''encoder.transformer_cells.{i}.attention_cell.proj_value.weight''' )
# self attention output
__a : BertSelfOutput = layer.attention.output
__a : Tuple = check_and_map_params(
self_output.dense.bias , f'''encoder.transformer_cells.{i}.proj.bias''' )
__a : Dict = check_and_map_params(
self_output.dense.weight , f'''encoder.transformer_cells.{i}.proj.weight''' )
__a : Optional[Any] = check_and_map_params(
self_output.LayerNorm.bias , f'''encoder.transformer_cells.{i}.layer_norm.beta''' )
__a : Optional[Any] = check_and_map_params(
self_output.LayerNorm.weight , f'''encoder.transformer_cells.{i}.layer_norm.gamma''' )
# intermediate
__a : BertIntermediate = layer.intermediate
__a : List[str] = check_and_map_params(
intermediate.dense.bias , f'''encoder.transformer_cells.{i}.ffn.ffn_1.bias''' )
__a : Optional[Any] = check_and_map_params(
intermediate.dense.weight , f'''encoder.transformer_cells.{i}.ffn.ffn_1.weight''' )
# output
__a : BertOutput = layer.output
__a : str = check_and_map_params(
bert_output.dense.bias , f'''encoder.transformer_cells.{i}.ffn.ffn_2.bias''' )
__a : List[Any] = check_and_map_params(
bert_output.dense.weight , f'''encoder.transformer_cells.{i}.ffn.ffn_2.weight''' )
__a : str = check_and_map_params(
bert_output.LayerNorm.bias , f'''encoder.transformer_cells.{i}.ffn.layer_norm.beta''' )
__a : List[str] = check_and_map_params(
bert_output.LayerNorm.weight , f'''encoder.transformer_cells.{i}.ffn.layer_norm.gamma''' )
# Save space and energy 🎄
hf_bort_model.half()
# Compare output of both models
__a : Union[str, Any] = RobertaTokenizer.from_pretrained('roberta-base' )
__a : Union[str, Any] = tokenizer.encode_plus(lowerCamelCase_ )['input_ids']
# Get gluon output
__a : Optional[int] = mx.nd.array([input_ids] )
__a : Tuple = original_bort(inputs=lowerCamelCase_ , token_types=[] )
# Get Transformer output (save and reload model again)
hf_bort_model.save_pretrained(lowerCamelCase_ )
__a : Optional[Any] = BertModel.from_pretrained(lowerCamelCase_ )
hf_bort_model.eval()
__a : Union[str, Any] = tokenizer.encode_plus(lowerCamelCase_ , return_tensors='pt' )
__a : int = hf_bort_model(**lowerCamelCase_ )[0]
__a : Dict = output_gluon[0].asnumpy()
__a : str = output_hf[0].detach().numpy()
__a : List[Any] = np.max(np.abs(hf_layer - gluon_layer ) ).item()
__a : str = np.allclose(lowerCamelCase_ , lowerCamelCase_ , atol=1e-3 )
if success:
print('✔️ Both model do output the same tensors' )
else:
print('❌ Both model do **NOT** output the same tensors' )
print('Absolute difference is:' , lowerCamelCase_ )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--bort_checkpoint_path''', default=None, type=str, required=True, help='''Path the official Bort params file.'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
SCREAMING_SNAKE_CASE__ = parser.parse_args()
convert_bort_checkpoint_to_pytorch(args.bort_checkpoint_path, args.pytorch_dump_folder_path)
| 47 | 1 |
import unittest
from transformers.utils.backbone_utils import (
BackboneMixin,
get_aligned_output_features_output_indices,
verify_out_features_out_indices,
)
class _UpperCamelCase( unittest.TestCase ):
def __lowerCAmelCase ( self : Tuple ):
'''simple docstring'''
__a : Union[str, Any] = ['a', 'b', 'c']
# Defaults to last layer if both are None
__a , __a : Tuple = get_aligned_output_features_output_indices(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
self.assertEqual(SCREAMING_SNAKE_CASE__ , ['c'] )
self.assertEqual(SCREAMING_SNAKE_CASE__ , [2] )
# Out indices set to match out features
__a , __a : Optional[int] = get_aligned_output_features_output_indices(['a', 'c'] , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
self.assertEqual(SCREAMING_SNAKE_CASE__ , ['a', 'c'] )
self.assertEqual(SCREAMING_SNAKE_CASE__ , [0, 2] )
# Out features set to match out indices
__a , __a : Any = get_aligned_output_features_output_indices(SCREAMING_SNAKE_CASE__ , [0, 2] , SCREAMING_SNAKE_CASE__ )
self.assertEqual(SCREAMING_SNAKE_CASE__ , ['a', 'c'] )
self.assertEqual(SCREAMING_SNAKE_CASE__ , [0, 2] )
# Out features selected from negative indices
__a , __a : List[str] = get_aligned_output_features_output_indices(SCREAMING_SNAKE_CASE__ , [-3, -1] , SCREAMING_SNAKE_CASE__ )
self.assertEqual(SCREAMING_SNAKE_CASE__ , ['a', 'c'] )
self.assertEqual(SCREAMING_SNAKE_CASE__ , [-3, -1] )
def __lowerCAmelCase ( self : int ):
'''simple docstring'''
with self.assertRaises(SCREAMING_SNAKE_CASE__ ):
verify_out_features_out_indices(['a', 'b'] , (0, 1) , SCREAMING_SNAKE_CASE__ )
# Out features must be a list
with self.assertRaises(SCREAMING_SNAKE_CASE__ ):
verify_out_features_out_indices(('a', 'b') , (0, 1) , ['a', 'b'] )
# Out features must be a subset of stage names
with self.assertRaises(SCREAMING_SNAKE_CASE__ ):
verify_out_features_out_indices(['a', 'b'] , (0, 1) , ['a'] )
# Out indices must be a list or tuple
with self.assertRaises(SCREAMING_SNAKE_CASE__ ):
verify_out_features_out_indices(SCREAMING_SNAKE_CASE__ , 0 , ['a', 'b'] )
# Out indices must be a subset of stage names
with self.assertRaises(SCREAMING_SNAKE_CASE__ ):
verify_out_features_out_indices(SCREAMING_SNAKE_CASE__ , (0, 1) , ['a'] )
# Out features and out indices must be the same length
with self.assertRaises(SCREAMING_SNAKE_CASE__ ):
verify_out_features_out_indices(['a', 'b'] , (0,) , ['a', 'b', 'c'] )
# Out features should match out indices
with self.assertRaises(SCREAMING_SNAKE_CASE__ ):
verify_out_features_out_indices(['a', 'b'] , (0, 2) , ['a', 'b', 'c'] )
# Out features and out indices should be in order
with self.assertRaises(SCREAMING_SNAKE_CASE__ ):
verify_out_features_out_indices(['b', 'a'] , (0, 1) , ['a', 'b'] )
# Check passes with valid inputs
verify_out_features_out_indices(['a', 'b', 'd'] , (0, 1, -1) , ['a', 'b', 'c', 'd'] )
def __lowerCAmelCase ( self : Union[str, Any] ):
'''simple docstring'''
__a : List[str] = BackboneMixin()
__a : List[Any] = ['a', 'b', 'c']
__a : List[str] = ['a', 'c']
__a : List[Any] = [0, 2]
# Check that the output features and indices are set correctly
self.assertEqual(backbone.out_features , ['a', 'c'] )
self.assertEqual(backbone.out_indices , [0, 2] )
# Check out features and indices are updated correctly
__a : Optional[int] = ['a', 'b']
self.assertEqual(backbone.out_features , ['a', 'b'] )
self.assertEqual(backbone.out_indices , [0, 1] )
__a : Dict = [-3, -1]
self.assertEqual(backbone.out_features , ['a', 'c'] )
self.assertEqual(backbone.out_indices , [-3, -1] )
| 47 |
def UpperCAmelCase__ ( lowerCamelCase_ : Optional[int] , lowerCamelCase_ : List[str] ):
__a : Any = ''
for i in table:
res += inp[i - 1]
return res
def UpperCAmelCase__ ( lowerCamelCase_ : Optional[Any] ):
return data[1:] + data[0]
def UpperCAmelCase__ ( lowerCamelCase_ : Optional[Any] , lowerCamelCase_ : Optional[int] ):
__a : Optional[int] = ''
for i in range(len(lowerCamelCase_ ) ):
if a[i] == b[i]:
res += "0"
else:
res += "1"
return res
def UpperCAmelCase__ ( lowerCamelCase_ : List[Any] , lowerCamelCase_ : str ):
__a : List[str] = int('0b' + data[0] + data[-1] , 2 )
__a : List[str] = int('0b' + data[1:3] , 2 )
return bin(s[row][col] )[2:]
def UpperCAmelCase__ ( lowerCamelCase_ : Any , lowerCamelCase_ : List[str] , lowerCamelCase_ : int , lowerCamelCase_ : int , lowerCamelCase_ : Optional[Any] ):
__a : List[Any] = message[:4]
__a : str = message[4:]
__a : Any = apply_table(lowerCamelCase_ , lowerCamelCase_ )
__a : int = xor(lowerCamelCase_ , lowerCamelCase_ )
__a : Dict = apply_sbox(lowerCamelCase_ , temp[:4] ) # noqa: E741
__a : Tuple = apply_sbox(lowerCamelCase_ , temp[4:] )
__a : List[Any] = '0' * (2 - len(lowerCamelCase_ )) + l # noqa: E741
__a : List[str] = '0' * (2 - len(lowerCamelCase_ )) + r
__a : List[Any] = apply_table(l + r , lowerCamelCase_ )
__a : Dict = xor(lowerCamelCase_ , lowerCamelCase_ )
return temp + right
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ = input('''Enter 10 bit key: ''')
SCREAMING_SNAKE_CASE__ = input('''Enter 8 bit message: ''')
SCREAMING_SNAKE_CASE__ = [6, 3, 7, 4, 8, 5, 10, 9]
SCREAMING_SNAKE_CASE__ = [3, 5, 2, 7, 4, 10, 1, 9, 8, 6]
SCREAMING_SNAKE_CASE__ = [2, 4, 3, 1]
SCREAMING_SNAKE_CASE__ = [2, 6, 3, 1, 4, 8, 5, 7]
SCREAMING_SNAKE_CASE__ = [4, 1, 3, 5, 7, 2, 8, 6]
SCREAMING_SNAKE_CASE__ = [4, 1, 2, 3, 2, 3, 4, 1]
SCREAMING_SNAKE_CASE__ = [[1, 0, 3, 2], [3, 2, 1, 0], [0, 2, 1, 3], [3, 1, 3, 2]]
SCREAMING_SNAKE_CASE__ = [[0, 1, 2, 3], [2, 0, 1, 3], [3, 0, 1, 0], [2, 1, 0, 3]]
# key generation
SCREAMING_SNAKE_CASE__ = apply_table(key, paa_table)
SCREAMING_SNAKE_CASE__ = temp[:5]
SCREAMING_SNAKE_CASE__ = temp[5:]
SCREAMING_SNAKE_CASE__ = left_shift(left)
SCREAMING_SNAKE_CASE__ = left_shift(right)
SCREAMING_SNAKE_CASE__ = apply_table(left + right, pa_table)
SCREAMING_SNAKE_CASE__ = left_shift(left)
SCREAMING_SNAKE_CASE__ = left_shift(right)
SCREAMING_SNAKE_CASE__ = left_shift(left)
SCREAMING_SNAKE_CASE__ = left_shift(right)
SCREAMING_SNAKE_CASE__ = apply_table(left + right, pa_table)
# encryption
SCREAMING_SNAKE_CASE__ = apply_table(message, IP)
SCREAMING_SNAKE_CASE__ = function(expansion, sa, sa, keya, temp)
SCREAMING_SNAKE_CASE__ = temp[4:] + temp[:4]
SCREAMING_SNAKE_CASE__ = function(expansion, sa, sa, keya, temp)
SCREAMING_SNAKE_CASE__ = apply_table(temp, IP_inv)
print('''Cipher text is:''', CT)
# decryption
SCREAMING_SNAKE_CASE__ = apply_table(CT, IP)
SCREAMING_SNAKE_CASE__ = function(expansion, sa, sa, keya, temp)
SCREAMING_SNAKE_CASE__ = temp[4:] + temp[:4]
SCREAMING_SNAKE_CASE__ = function(expansion, sa, sa, keya, temp)
SCREAMING_SNAKE_CASE__ = apply_table(temp, IP_inv)
print('''Plain text after decypting is:''', PT)
| 47 | 1 |
import json
import os
import unittest
from transformers import BatchEncoding, LEDTokenizer, LEDTokenizerFast
from transformers.models.led.tokenization_led import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, require_torch
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class _UpperCamelCase( __lowerCamelCase , unittest.TestCase ):
__SCREAMING_SNAKE_CASE : List[Any] = LEDTokenizer
__SCREAMING_SNAKE_CASE : List[str] = LEDTokenizerFast
__SCREAMING_SNAKE_CASE : Optional[Any] = True
def __lowerCAmelCase ( self : Optional[Any] ):
'''simple docstring'''
super().setUp()
__a : str = [
'l',
'o',
'w',
'e',
'r',
's',
't',
'i',
'd',
'n',
'\u0120',
'\u0120l',
'\u0120n',
'\u0120lo',
'\u0120low',
'er',
'\u0120lowest',
'\u0120newer',
'\u0120wider',
'<unk>',
]
__a : Union[str, Any] = dict(zip(SCREAMING_SNAKE_CASE__ , range(len(SCREAMING_SNAKE_CASE__ ) ) ) )
__a : str = ['#version: 0.2', '\u0120 l', '\u0120l o', '\u0120lo w', 'e r', '']
__a : Optional[int] = {'unk_token': '<unk>'}
__a : List[str] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
__a : Optional[int] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp:
fp.write(json.dumps(SCREAMING_SNAKE_CASE__ ) + '\n' )
with open(self.merges_file , 'w' , encoding='utf-8' ) as fp:
fp.write('\n'.join(SCREAMING_SNAKE_CASE__ ) )
def __lowerCAmelCase ( self : Union[str, Any] , **SCREAMING_SNAKE_CASE__ : int ):
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE__ )
def __lowerCAmelCase ( self : str , **SCREAMING_SNAKE_CASE__ : List[str] ):
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return self.rust_tokenizer_class.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE__ )
def __lowerCAmelCase ( self : List[str] , SCREAMING_SNAKE_CASE__ : List[str] ):
'''simple docstring'''
return "lower newer", "lower newer"
@cached_property
def __lowerCAmelCase ( self : str ):
'''simple docstring'''
return LEDTokenizer.from_pretrained('allenai/led-base-16384' )
@cached_property
def __lowerCAmelCase ( self : List[str] ):
'''simple docstring'''
return LEDTokenizerFast.from_pretrained('allenai/led-base-16384' )
@require_torch
def __lowerCAmelCase ( self : List[str] ):
'''simple docstring'''
__a : str = ['A long paragraph for summarization.', 'Another paragraph for summarization.']
__a : int = [0, 2_5_0, 2_5_1, 1_7_8_1_8, 1_3, 3_9_1_8_6, 1_9_3_8, 4, 2]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
__a : Optional[int] = tokenizer(SCREAMING_SNAKE_CASE__ , max_length=len(SCREAMING_SNAKE_CASE__ ) , padding=SCREAMING_SNAKE_CASE__ , return_tensors='pt' )
self.assertIsInstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
self.assertEqual((2, 9) , batch.input_ids.shape )
self.assertEqual((2, 9) , batch.attention_mask.shape )
__a : int = batch.input_ids.tolist()[0]
self.assertListEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
@require_torch
def __lowerCAmelCase ( self : List[Any] ):
'''simple docstring'''
__a : Any = ['A long paragraph for summarization.', 'Another paragraph for summarization.']
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
__a : Dict = tokenizer(SCREAMING_SNAKE_CASE__ , padding=SCREAMING_SNAKE_CASE__ , return_tensors='pt' )
self.assertIn('input_ids' , SCREAMING_SNAKE_CASE__ )
self.assertIn('attention_mask' , SCREAMING_SNAKE_CASE__ )
self.assertNotIn('labels' , SCREAMING_SNAKE_CASE__ )
self.assertNotIn('decoder_attention_mask' , SCREAMING_SNAKE_CASE__ )
@require_torch
def __lowerCAmelCase ( self : Dict ):
'''simple docstring'''
__a : int = [
'Summary of the text.',
'Another summary.',
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
__a : Optional[int] = tokenizer(text_target=SCREAMING_SNAKE_CASE__ , max_length=3_2 , padding='max_length' , return_tensors='pt' )
self.assertEqual(3_2 , targets['input_ids'].shape[1] )
@require_torch
def __lowerCAmelCase ( self : Union[str, Any] ):
'''simple docstring'''
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
__a : List[Any] = tokenizer(
['I am a small frog' * 1_0_2_4, 'I am a small frog'] , padding=SCREAMING_SNAKE_CASE__ , truncation=SCREAMING_SNAKE_CASE__ , return_tensors='pt' )
self.assertIsInstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
self.assertEqual(batch.input_ids.shape , (2, 5_1_2_2) )
@require_torch
def __lowerCAmelCase ( self : Tuple ):
'''simple docstring'''
__a : Dict = ['A long paragraph for summarization.']
__a : List[Any] = [
'Summary of the text.',
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
__a : List[str] = tokenizer(SCREAMING_SNAKE_CASE__ , return_tensors='pt' )
__a : Union[str, Any] = tokenizer(text_target=SCREAMING_SNAKE_CASE__ , return_tensors='pt' )
__a : Tuple = inputs['input_ids']
__a : List[str] = targets['input_ids']
self.assertTrue((input_ids[:, 0] == tokenizer.bos_token_id).all().item() )
self.assertTrue((labels[:, 0] == tokenizer.bos_token_id).all().item() )
self.assertTrue((input_ids[:, -1] == tokenizer.eos_token_id).all().item() )
self.assertTrue((labels[:, -1] == tokenizer.eos_token_id).all().item() )
@require_torch
def __lowerCAmelCase ( self : Optional[int] ):
'''simple docstring'''
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
__a : Tuple = ['Summary of the text.', 'Another summary.']
__a : Tuple = [[0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, -1, -1]]
__a : Union[str, Any] = tokenizer(SCREAMING_SNAKE_CASE__ , padding=SCREAMING_SNAKE_CASE__ )
__a : Dict = [[0] * len(SCREAMING_SNAKE_CASE__ ) for x in encoded_output['input_ids']]
__a : Optional[int] = tokenizer.pad(SCREAMING_SNAKE_CASE__ )
self.assertSequenceEqual(outputs['global_attention_mask'] , SCREAMING_SNAKE_CASE__ )
def __lowerCAmelCase ( self : str ):
'''simple docstring'''
pass
def __lowerCAmelCase ( self : int ):
'''simple docstring'''
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
__a : Tuple = self.rust_tokenizer_class.from_pretrained(SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
__a : str = self.tokenizer_class.from_pretrained(SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
__a : Optional[Any] = 'A, <mask> AllenNLP sentence.'
__a : Dict = tokenizer_r.encode_plus(SCREAMING_SNAKE_CASE__ , add_special_tokens=SCREAMING_SNAKE_CASE__ , return_token_type_ids=SCREAMING_SNAKE_CASE__ )
__a : Dict = tokenizer_p.encode_plus(SCREAMING_SNAKE_CASE__ , add_special_tokens=SCREAMING_SNAKE_CASE__ , return_token_type_ids=SCREAMING_SNAKE_CASE__ )
self.assertEqual(sum(tokens_r['token_type_ids'] ) , sum(tokens_p['token_type_ids'] ) )
self.assertEqual(
sum(tokens_r['attention_mask'] ) / len(tokens_r['attention_mask'] ) , sum(tokens_p['attention_mask'] ) / len(tokens_p['attention_mask'] ) , )
__a : Dict = tokenizer_r.convert_ids_to_tokens(tokens_r['input_ids'] )
__a : List[str] = tokenizer_p.convert_ids_to_tokens(tokens_p['input_ids'] )
self.assertSequenceEqual(tokens_p['input_ids'] , [0, 2_5_0, 6, 5_0_2_6_4, 3_8_2_3, 4_8_7, 2_1_9_9_2, 3_6_4_5, 4, 2] )
self.assertSequenceEqual(tokens_r['input_ids'] , [0, 2_5_0, 6, 5_0_2_6_4, 3_8_2_3, 4_8_7, 2_1_9_9_2, 3_6_4_5, 4, 2] )
self.assertSequenceEqual(
SCREAMING_SNAKE_CASE__ , ['<s>', 'A', ',', '<mask>', 'ĠAllen', 'N', 'LP', 'Ġsentence', '.', '</s>'] )
self.assertSequenceEqual(
SCREAMING_SNAKE_CASE__ , ['<s>', 'A', ',', '<mask>', 'ĠAllen', 'N', 'LP', 'Ġsentence', '.', '</s>'] )
| 47 |
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_tf
if is_tf_available():
import tensorflow as tf
from tensorflow.python.eager import context
from tensorflow.python.framework import ops
from transformers import GradientAccumulator, create_optimizer
@require_tf
class _UpperCamelCase( unittest.TestCase ):
def __lowerCAmelCase ( self : List[str] , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : Union[str, Any] ):
'''simple docstring'''
self.assertEqual(len(SCREAMING_SNAKE_CASE__ ) , len(SCREAMING_SNAKE_CASE__ ) )
for a, b in zip(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
self.assertAlmostEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , delta=SCREAMING_SNAKE_CASE__ )
def __lowerCAmelCase ( self : Any ):
'''simple docstring'''
__a : List[Any] = GradientAccumulator()
accumulator([tf.constant([1.0, 2.0] )] )
accumulator([tf.constant([-2.0, 1.0] )] )
accumulator([tf.constant([-1.0, 2.0] )] )
with self.assertRaises(SCREAMING_SNAKE_CASE__ ):
accumulator([tf.constant([1.0, 1.0] ), tf.constant([2.0, 2.0] )] )
self.assertEqual(accumulator.step , 3 )
self.assertEqual(len(accumulator.gradients ) , 1 )
self.assertListAlmostEqual(accumulator.gradients[0].numpy().tolist() , [-2.0, 5.0] , tol=1e-2 )
accumulator.reset()
self.assertEqual(accumulator.step , 0 )
self.assertListAlmostEqual(accumulator.gradients[0].numpy().tolist() , [0.0, 0.0] , tol=1e-2 )
def __lowerCAmelCase ( self : Dict ):
'''simple docstring'''
__a : int = None
ops.enable_eager_execution_internal()
__a : Optional[Any] = tf.config.list_physical_devices('CPU' )
if len(SCREAMING_SNAKE_CASE__ ) == 1:
tf.config.set_logical_device_configuration(
physical_devices[0] , [tf.config.LogicalDeviceConfiguration(), tf.config.LogicalDeviceConfiguration()] )
__a : int = tf.config.list_logical_devices(device_type='CPU' )
__a : str = tf.distribute.MirroredStrategy(devices=devices[:2] )
with strategy.scope():
__a : List[str] = GradientAccumulator()
__a : Tuple = tf.Variable([4.0, 3.0] )
__a , __a : int = create_optimizer(5e-5 , 1_0 , 5 )
__a : List[Any] = tf.Variable([0.0, 0.0] , trainable=SCREAMING_SNAKE_CASE__ )
def accumulate_on_replica(SCREAMING_SNAKE_CASE__ : Optional[Any] ):
accumulator([gradient] )
def apply_on_replica():
optimizer.apply_gradients(list(zip(accumulator.gradients , [variable] ) ) )
@tf.function
def accumulate(SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Tuple ):
with strategy.scope():
__a : Optional[Any] = strategy.experimental_local_results(SCREAMING_SNAKE_CASE__ )
local_variables[0].assign(SCREAMING_SNAKE_CASE__ )
local_variables[1].assign(SCREAMING_SNAKE_CASE__ )
strategy.run(SCREAMING_SNAKE_CASE__ , args=(gradient_placeholder,) )
@tf.function
def apply_grad():
with strategy.scope():
strategy.run(SCREAMING_SNAKE_CASE__ )
def _check_local_values(SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : int ):
__a : Union[str, Any] = strategy.experimental_local_results(accumulator._gradients[0] )
self.assertListAlmostEqual(values[0].value() , SCREAMING_SNAKE_CASE__ , tol=1e-2 )
self.assertListAlmostEqual(values[1].value() , SCREAMING_SNAKE_CASE__ , tol=1e-2 )
accumulate([1.0, 2.0] , [-1.0, 1.0] )
accumulate([3.0, -1.0] , [-1.0, -1.0] )
accumulate([-2.0, 2.0] , [3.0, -2.0] )
self.assertEqual(accumulator.step , 3 )
_check_local_values([2.0, 3.0] , [1.0, -2.0] )
apply_grad()
self.assertListAlmostEqual(variable.value() , [4.0, 3.0] , tol=1e-2 )
accumulator.reset()
self.assertEqual(accumulator.step , 0 )
_check_local_values([0.0, 0.0] , [0.0, 0.0] )
| 47 | 1 |
import unittest
from .lib import (
Matrix,
Vector,
axpy,
square_zero_matrix,
unit_basis_vector,
zero_vector,
)
class _UpperCamelCase( unittest.TestCase ):
def __lowerCAmelCase ( self : Union[str, Any] ):
'''simple docstring'''
__a : List[str] = Vector([1, 2, 3] )
self.assertEqual(x.component(0 ) , 1 )
self.assertEqual(x.component(2 ) , 3 )
__a : int = Vector()
def __lowerCAmelCase ( self : List[str] ):
'''simple docstring'''
__a : List[Any] = Vector([0, 0, 0, 0, 0, 1] )
self.assertEqual(str(SCREAMING_SNAKE_CASE__ ) , '(0,0,0,0,0,1)' )
def __lowerCAmelCase ( self : Any ):
'''simple docstring'''
__a : Optional[int] = Vector([1, 2, 3, 4] )
self.assertEqual(len(SCREAMING_SNAKE_CASE__ ) , 4 )
def __lowerCAmelCase ( self : List[Any] ):
'''simple docstring'''
__a : Union[str, Any] = Vector([1, 2] )
__a : Dict = Vector([1, 2, 3, 4, 5] )
__a : Optional[int] = Vector([0, 0, 0, 0, 0, 0, 0, 0, 0, 0] )
__a : int = Vector([1, -1, 1, -1, 2, -3, 4, -5] )
self.assertAlmostEqual(x.euclidean_length() , 2.236 , 3 )
self.assertAlmostEqual(y.euclidean_length() , 7.416 , 3 )
self.assertEqual(z.euclidean_length() , 0 )
self.assertAlmostEqual(w.euclidean_length() , 7.616 , 3 )
def __lowerCAmelCase ( self : Dict ):
'''simple docstring'''
__a : List[Any] = Vector([1, 2, 3] )
__a : Optional[Any] = Vector([1, 1, 1] )
self.assertEqual((x + y).component(0 ) , 2 )
self.assertEqual((x + y).component(1 ) , 3 )
self.assertEqual((x + y).component(2 ) , 4 )
def __lowerCAmelCase ( self : Optional[int] ):
'''simple docstring'''
__a : List[Any] = Vector([1, 2, 3] )
__a : Union[str, Any] = Vector([1, 1, 1] )
self.assertEqual((x - y).component(0 ) , 0 )
self.assertEqual((x - y).component(1 ) , 1 )
self.assertEqual((x - y).component(2 ) , 2 )
def __lowerCAmelCase ( self : Tuple ):
'''simple docstring'''
__a : Optional[Any] = Vector([1, 2, 3] )
__a : Optional[int] = Vector([2, -1, 4] ) # for test of dot product
__a : str = Vector([1, -2, -1] )
self.assertEqual(str(x * 3.0 ) , '(3.0,6.0,9.0)' )
self.assertEqual((a * b) , 0 )
def __lowerCAmelCase ( self : Optional[Any] ):
'''simple docstring'''
self.assertEqual(str(zero_vector(1_0 ) ).count('0' ) , 1_0 )
def __lowerCAmelCase ( self : Dict ):
'''simple docstring'''
self.assertEqual(str(unit_basis_vector(3 , 1 ) ) , '(0,1,0)' )
def __lowerCAmelCase ( self : List[Any] ):
'''simple docstring'''
__a : Dict = Vector([1, 2, 3] )
__a : str = Vector([1, 0, 1] )
self.assertEqual(str(axpy(2 , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) ) , '(3,4,7)' )
def __lowerCAmelCase ( self : List[str] ):
'''simple docstring'''
__a : str = Vector([1, 0, 0, 0, 0, 0] )
__a : Optional[Any] = x.copy()
self.assertEqual(str(SCREAMING_SNAKE_CASE__ ) , str(SCREAMING_SNAKE_CASE__ ) )
def __lowerCAmelCase ( self : Tuple ):
'''simple docstring'''
__a : int = Vector([1, 0, 0] )
x.change_component(0 , 0 )
x.change_component(1 , 1 )
self.assertEqual(str(SCREAMING_SNAKE_CASE__ ) , '(0,1,0)' )
def __lowerCAmelCase ( self : Tuple ):
'''simple docstring'''
__a : Any = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
self.assertEqual('|1,2,3|\n|2,4,5|\n|6,7,8|\n' , str(SCREAMING_SNAKE_CASE__ ) )
def __lowerCAmelCase ( self : int ):
'''simple docstring'''
__a : str = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
__a : str = [[-3, -1_4, -1_0], [-5, -1_0, -5], [-2, -1, 0]]
for x in range(a.height() ):
for y in range(a.width() ):
self.assertEqual(minors[x][y] , a.minor(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) )
def __lowerCAmelCase ( self : Union[str, Any] ):
'''simple docstring'''
__a : Any = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
__a : Union[str, Any] = [[-3, 1_4, -1_0], [5, -1_0, 5], [-2, 1, 0]]
for x in range(a.height() ):
for y in range(a.width() ):
self.assertEqual(cofactors[x][y] , a.cofactor(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) )
def __lowerCAmelCase ( self : List[str] ):
'''simple docstring'''
__a : Optional[int] = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
self.assertEqual(-5 , a.determinant() )
def __lowerCAmelCase ( self : int ):
'''simple docstring'''
__a : Union[str, Any] = Matrix([[1, 2, 3], [4, 5, 6], [7, 8, 9]] , 3 , 3 )
__a : Tuple = Vector([1, 2, 3] )
self.assertEqual('(14,32,50)' , str(a * x ) )
self.assertEqual('|2,4,6|\n|8,10,12|\n|14,16,18|\n' , str(a * 2 ) )
def __lowerCAmelCase ( self : str ):
'''simple docstring'''
__a : Optional[Any] = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
a.change_component(0 , 2 , 5 )
self.assertEqual('|1,2,5|\n|2,4,5|\n|6,7,8|\n' , str(SCREAMING_SNAKE_CASE__ ) )
def __lowerCAmelCase ( self : Tuple ):
'''simple docstring'''
__a : Optional[int] = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
self.assertEqual(7 , a.component(2 , 1 ) , 0.01 )
def __lowerCAmelCase ( self : Dict ):
'''simple docstring'''
__a : str = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
__a : List[Any] = Matrix([[1, 2, 7], [2, 4, 5], [6, 7, 1_0]] , 3 , 3 )
self.assertEqual('|2,4,10|\n|4,8,10|\n|12,14,18|\n' , str(a + b ) )
def __lowerCAmelCase ( self : Optional[int] ):
'''simple docstring'''
__a : List[str] = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
__a : Any = Matrix([[1, 2, 7], [2, 4, 5], [6, 7, 1_0]] , 3 , 3 )
self.assertEqual('|0,0,-4|\n|0,0,0|\n|0,0,-2|\n' , str(a - b ) )
def __lowerCAmelCase ( self : int ):
'''simple docstring'''
self.assertEqual(
'|0,0,0,0,0|\n|0,0,0,0,0|\n|0,0,0,0,0|\n|0,0,0,0,0|\n|0,0,0,0,0|\n' , str(square_zero_matrix(5 ) ) , )
if __name__ == "__main__":
unittest.main()
| 47 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ = {
'''roberta-base''': '''https://huggingface.co/roberta-base/resolve/main/config.json''',
'''roberta-large''': '''https://huggingface.co/roberta-large/resolve/main/config.json''',
'''roberta-large-mnli''': '''https://huggingface.co/roberta-large-mnli/resolve/main/config.json''',
'''distilroberta-base''': '''https://huggingface.co/distilroberta-base/resolve/main/config.json''',
'''roberta-base-openai-detector''': '''https://huggingface.co/roberta-base-openai-detector/resolve/main/config.json''',
'''roberta-large-openai-detector''': '''https://huggingface.co/roberta-large-openai-detector/resolve/main/config.json''',
}
class _UpperCamelCase( __lowerCamelCase ):
__SCREAMING_SNAKE_CASE : Union[str, Any] = '''roberta'''
def __init__( self : List[Any] , SCREAMING_SNAKE_CASE__ : Union[str, Any]=5_0_2_6_5 , SCREAMING_SNAKE_CASE__ : Optional[int]=7_6_8 , SCREAMING_SNAKE_CASE__ : str=1_2 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=1_2 , SCREAMING_SNAKE_CASE__ : Optional[Any]=3_0_7_2 , SCREAMING_SNAKE_CASE__ : Any="gelu" , SCREAMING_SNAKE_CASE__ : Optional[Any]=0.1 , SCREAMING_SNAKE_CASE__ : str=0.1 , SCREAMING_SNAKE_CASE__ : Optional[Any]=5_1_2 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=2 , SCREAMING_SNAKE_CASE__ : Any=0.02 , SCREAMING_SNAKE_CASE__ : List[str]=1e-12 , SCREAMING_SNAKE_CASE__ : Optional[Any]=1 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=0 , SCREAMING_SNAKE_CASE__ : List[str]=2 , SCREAMING_SNAKE_CASE__ : Tuple="absolute" , SCREAMING_SNAKE_CASE__ : Any=True , SCREAMING_SNAKE_CASE__ : List[str]=None , **SCREAMING_SNAKE_CASE__ : Any , ):
'''simple docstring'''
super().__init__(pad_token_id=SCREAMING_SNAKE_CASE__ , bos_token_id=SCREAMING_SNAKE_CASE__ , eos_token_id=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
__a : Optional[Any] = vocab_size
__a : Tuple = hidden_size
__a : List[str] = num_hidden_layers
__a : List[Any] = num_attention_heads
__a : str = hidden_act
__a : Optional[Any] = intermediate_size
__a : Dict = hidden_dropout_prob
__a : List[str] = attention_probs_dropout_prob
__a : Optional[Any] = max_position_embeddings
__a : Dict = type_vocab_size
__a : str = initializer_range
__a : List[str] = layer_norm_eps
__a : Optional[int] = position_embedding_type
__a : Union[str, Any] = use_cache
__a : str = classifier_dropout
class _UpperCamelCase( __lowerCamelCase ):
@property
def __lowerCAmelCase ( self : Any ):
'''simple docstring'''
if self.task == "multiple-choice":
__a : List[str] = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
__a : Dict = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
] )
| 47 | 1 |
from ..utils import DummyObject, requires_backends
class _UpperCamelCase( metaclass=__lowerCamelCase ):
__SCREAMING_SNAKE_CASE : Tuple = ['''flax''']
def __init__( self : Dict , *SCREAMING_SNAKE_CASE__ : Optional[int] , **SCREAMING_SNAKE_CASE__ : List[Any] ):
'''simple docstring'''
requires_backends(self , ['flax'] )
@classmethod
def __lowerCAmelCase ( cls : Tuple , *SCREAMING_SNAKE_CASE__ : int , **SCREAMING_SNAKE_CASE__ : List[str] ):
'''simple docstring'''
requires_backends(cls , ['flax'] )
@classmethod
def __lowerCAmelCase ( cls : str , *SCREAMING_SNAKE_CASE__ : int , **SCREAMING_SNAKE_CASE__ : Optional[int] ):
'''simple docstring'''
requires_backends(cls , ['flax'] )
class _UpperCamelCase( metaclass=__lowerCamelCase ):
__SCREAMING_SNAKE_CASE : List[str] = ['''flax''']
def __init__( self : Optional[Any] , *SCREAMING_SNAKE_CASE__ : Dict , **SCREAMING_SNAKE_CASE__ : Optional[int] ):
'''simple docstring'''
requires_backends(self , ['flax'] )
@classmethod
def __lowerCAmelCase ( cls : Tuple , *SCREAMING_SNAKE_CASE__ : List[str] , **SCREAMING_SNAKE_CASE__ : Any ):
'''simple docstring'''
requires_backends(cls , ['flax'] )
@classmethod
def __lowerCAmelCase ( cls : int , *SCREAMING_SNAKE_CASE__ : Tuple , **SCREAMING_SNAKE_CASE__ : List[str] ):
'''simple docstring'''
requires_backends(cls , ['flax'] )
class _UpperCamelCase( metaclass=__lowerCamelCase ):
__SCREAMING_SNAKE_CASE : Any = ['''flax''']
def __init__( self : Optional[int] , *SCREAMING_SNAKE_CASE__ : Any , **SCREAMING_SNAKE_CASE__ : List[Any] ):
'''simple docstring'''
requires_backends(self , ['flax'] )
@classmethod
def __lowerCAmelCase ( cls : Optional[int] , *SCREAMING_SNAKE_CASE__ : List[str] , **SCREAMING_SNAKE_CASE__ : List[Any] ):
'''simple docstring'''
requires_backends(cls , ['flax'] )
@classmethod
def __lowerCAmelCase ( cls : List[str] , *SCREAMING_SNAKE_CASE__ : Optional[int] , **SCREAMING_SNAKE_CASE__ : Optional[Any] ):
'''simple docstring'''
requires_backends(cls , ['flax'] )
class _UpperCamelCase( metaclass=__lowerCamelCase ):
__SCREAMING_SNAKE_CASE : Dict = ['''flax''']
def __init__( self : Union[str, Any] , *SCREAMING_SNAKE_CASE__ : Optional[Any] , **SCREAMING_SNAKE_CASE__ : Optional[Any] ):
'''simple docstring'''
requires_backends(self , ['flax'] )
@classmethod
def __lowerCAmelCase ( cls : List[Any] , *SCREAMING_SNAKE_CASE__ : Union[str, Any] , **SCREAMING_SNAKE_CASE__ : Tuple ):
'''simple docstring'''
requires_backends(cls , ['flax'] )
@classmethod
def __lowerCAmelCase ( cls : str , *SCREAMING_SNAKE_CASE__ : Tuple , **SCREAMING_SNAKE_CASE__ : List[str] ):
'''simple docstring'''
requires_backends(cls , ['flax'] )
class _UpperCamelCase( metaclass=__lowerCamelCase ):
__SCREAMING_SNAKE_CASE : int = ['''flax''']
def __init__( self : int , *SCREAMING_SNAKE_CASE__ : str , **SCREAMING_SNAKE_CASE__ : Any ):
'''simple docstring'''
requires_backends(self , ['flax'] )
@classmethod
def __lowerCAmelCase ( cls : Union[str, Any] , *SCREAMING_SNAKE_CASE__ : str , **SCREAMING_SNAKE_CASE__ : Optional[int] ):
'''simple docstring'''
requires_backends(cls , ['flax'] )
@classmethod
def __lowerCAmelCase ( cls : Optional[int] , *SCREAMING_SNAKE_CASE__ : List[Any] , **SCREAMING_SNAKE_CASE__ : Optional[Any] ):
'''simple docstring'''
requires_backends(cls , ['flax'] )
class _UpperCamelCase( metaclass=__lowerCamelCase ):
__SCREAMING_SNAKE_CASE : Union[str, Any] = ['''flax''']
def __init__( self : str , *SCREAMING_SNAKE_CASE__ : Tuple , **SCREAMING_SNAKE_CASE__ : Any ):
'''simple docstring'''
requires_backends(self , ['flax'] )
@classmethod
def __lowerCAmelCase ( cls : List[str] , *SCREAMING_SNAKE_CASE__ : Union[str, Any] , **SCREAMING_SNAKE_CASE__ : Union[str, Any] ):
'''simple docstring'''
requires_backends(cls , ['flax'] )
@classmethod
def __lowerCAmelCase ( cls : List[str] , *SCREAMING_SNAKE_CASE__ : List[Any] , **SCREAMING_SNAKE_CASE__ : Optional[int] ):
'''simple docstring'''
requires_backends(cls , ['flax'] )
class _UpperCamelCase( metaclass=__lowerCamelCase ):
__SCREAMING_SNAKE_CASE : List[Any] = ['''flax''']
def __init__( self : Dict , *SCREAMING_SNAKE_CASE__ : List[str] , **SCREAMING_SNAKE_CASE__ : Tuple ):
'''simple docstring'''
requires_backends(self , ['flax'] )
@classmethod
def __lowerCAmelCase ( cls : List[str] , *SCREAMING_SNAKE_CASE__ : str , **SCREAMING_SNAKE_CASE__ : Optional[int] ):
'''simple docstring'''
requires_backends(cls , ['flax'] )
@classmethod
def __lowerCAmelCase ( cls : List[str] , *SCREAMING_SNAKE_CASE__ : List[str] , **SCREAMING_SNAKE_CASE__ : List[str] ):
'''simple docstring'''
requires_backends(cls , ['flax'] )
class _UpperCamelCase( metaclass=__lowerCamelCase ):
__SCREAMING_SNAKE_CASE : List[Any] = ['''flax''']
def __init__( self : List[str] , *SCREAMING_SNAKE_CASE__ : List[str] , **SCREAMING_SNAKE_CASE__ : List[Any] ):
'''simple docstring'''
requires_backends(self , ['flax'] )
@classmethod
def __lowerCAmelCase ( cls : Optional[int] , *SCREAMING_SNAKE_CASE__ : Tuple , **SCREAMING_SNAKE_CASE__ : List[Any] ):
'''simple docstring'''
requires_backends(cls , ['flax'] )
@classmethod
def __lowerCAmelCase ( cls : Any , *SCREAMING_SNAKE_CASE__ : Optional[int] , **SCREAMING_SNAKE_CASE__ : Tuple ):
'''simple docstring'''
requires_backends(cls , ['flax'] )
class _UpperCamelCase( metaclass=__lowerCamelCase ):
__SCREAMING_SNAKE_CASE : Tuple = ['''flax''']
def __init__( self : int , *SCREAMING_SNAKE_CASE__ : Tuple , **SCREAMING_SNAKE_CASE__ : List[Any] ):
'''simple docstring'''
requires_backends(self , ['flax'] )
@classmethod
def __lowerCAmelCase ( cls : List[Any] , *SCREAMING_SNAKE_CASE__ : Optional[Any] , **SCREAMING_SNAKE_CASE__ : Optional[Any] ):
'''simple docstring'''
requires_backends(cls , ['flax'] )
@classmethod
def __lowerCAmelCase ( cls : Tuple , *SCREAMING_SNAKE_CASE__ : str , **SCREAMING_SNAKE_CASE__ : Optional[int] ):
'''simple docstring'''
requires_backends(cls , ['flax'] )
class _UpperCamelCase( metaclass=__lowerCamelCase ):
__SCREAMING_SNAKE_CASE : Optional[Any] = ['''flax''']
def __init__( self : int , *SCREAMING_SNAKE_CASE__ : Optional[Any] , **SCREAMING_SNAKE_CASE__ : List[Any] ):
'''simple docstring'''
requires_backends(self , ['flax'] )
@classmethod
def __lowerCAmelCase ( cls : Dict , *SCREAMING_SNAKE_CASE__ : Any , **SCREAMING_SNAKE_CASE__ : Optional[int] ):
'''simple docstring'''
requires_backends(cls , ['flax'] )
@classmethod
def __lowerCAmelCase ( cls : Optional[int] , *SCREAMING_SNAKE_CASE__ : Optional[Any] , **SCREAMING_SNAKE_CASE__ : Optional[int] ):
'''simple docstring'''
requires_backends(cls , ['flax'] )
class _UpperCamelCase( metaclass=__lowerCamelCase ):
__SCREAMING_SNAKE_CASE : Optional[int] = ['''flax''']
def __init__( self : Union[str, Any] , *SCREAMING_SNAKE_CASE__ : Any , **SCREAMING_SNAKE_CASE__ : List[str] ):
'''simple docstring'''
requires_backends(self , ['flax'] )
@classmethod
def __lowerCAmelCase ( cls : Union[str, Any] , *SCREAMING_SNAKE_CASE__ : Optional[int] , **SCREAMING_SNAKE_CASE__ : str ):
'''simple docstring'''
requires_backends(cls , ['flax'] )
@classmethod
def __lowerCAmelCase ( cls : Tuple , *SCREAMING_SNAKE_CASE__ : Union[str, Any] , **SCREAMING_SNAKE_CASE__ : Optional[int] ):
'''simple docstring'''
requires_backends(cls , ['flax'] )
class _UpperCamelCase( metaclass=__lowerCamelCase ):
__SCREAMING_SNAKE_CASE : Optional[Any] = ['''flax''']
def __init__( self : Union[str, Any] , *SCREAMING_SNAKE_CASE__ : Tuple , **SCREAMING_SNAKE_CASE__ : Union[str, Any] ):
'''simple docstring'''
requires_backends(self , ['flax'] )
@classmethod
def __lowerCAmelCase ( cls : Optional[Any] , *SCREAMING_SNAKE_CASE__ : Any , **SCREAMING_SNAKE_CASE__ : List[Any] ):
'''simple docstring'''
requires_backends(cls , ['flax'] )
@classmethod
def __lowerCAmelCase ( cls : str , *SCREAMING_SNAKE_CASE__ : List[str] , **SCREAMING_SNAKE_CASE__ : str ):
'''simple docstring'''
requires_backends(cls , ['flax'] )
class _UpperCamelCase( metaclass=__lowerCamelCase ):
__SCREAMING_SNAKE_CASE : List[str] = ['''flax''']
def __init__( self : Any , *SCREAMING_SNAKE_CASE__ : Any , **SCREAMING_SNAKE_CASE__ : int ):
'''simple docstring'''
requires_backends(self , ['flax'] )
@classmethod
def __lowerCAmelCase ( cls : Any , *SCREAMING_SNAKE_CASE__ : Any , **SCREAMING_SNAKE_CASE__ : Union[str, Any] ):
'''simple docstring'''
requires_backends(cls , ['flax'] )
@classmethod
def __lowerCAmelCase ( cls : Dict , *SCREAMING_SNAKE_CASE__ : Optional[Any] , **SCREAMING_SNAKE_CASE__ : Optional[int] ):
'''simple docstring'''
requires_backends(cls , ['flax'] )
| 47 |
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ = '''▁'''
SCREAMING_SNAKE_CASE__ = {'''vocab_file''': '''sentencepiece.bpe.model'''}
SCREAMING_SNAKE_CASE__ = {
'''vocab_file''': {
'''facebook/xglm-564M''': '''https://huggingface.co/facebook/xglm-564M/resolve/main/sentencepiece.bpe.model''',
}
}
SCREAMING_SNAKE_CASE__ = {
'''facebook/xglm-564M''': 2048,
}
class _UpperCamelCase( __lowerCamelCase ):
__SCREAMING_SNAKE_CASE : Optional[Any] = VOCAB_FILES_NAMES
__SCREAMING_SNAKE_CASE : Union[str, Any] = PRETRAINED_VOCAB_FILES_MAP
__SCREAMING_SNAKE_CASE : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__SCREAMING_SNAKE_CASE : Any = ['''input_ids''', '''attention_mask''']
def __init__( self : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : List[Any]="<s>" , SCREAMING_SNAKE_CASE__ : List[str]="</s>" , SCREAMING_SNAKE_CASE__ : Optional[Any]="</s>" , SCREAMING_SNAKE_CASE__ : Union[str, Any]="<s>" , SCREAMING_SNAKE_CASE__ : str="<unk>" , SCREAMING_SNAKE_CASE__ : Dict="<pad>" , SCREAMING_SNAKE_CASE__ : Optional[Dict[str, Any]] = None , **SCREAMING_SNAKE_CASE__ : List[str] , ):
'''simple docstring'''
__a : Union[str, Any] = {} if sp_model_kwargs is None else sp_model_kwargs
# Compatibility with the original tokenizer
__a : Any = 7
__a : Union[str, Any] = [f'''<madeupword{i}>''' for i in range(self.num_madeup_words )]
__a : Union[str, Any] = kwargs.get('additional_special_tokens' , [] )
kwargs["additional_special_tokens"] += [
word for word in madeup_words if word not in kwargs["additional_special_tokens"]
]
super().__init__(
bos_token=SCREAMING_SNAKE_CASE__ , eos_token=SCREAMING_SNAKE_CASE__ , unk_token=SCREAMING_SNAKE_CASE__ , sep_token=SCREAMING_SNAKE_CASE__ , cls_token=SCREAMING_SNAKE_CASE__ , pad_token=SCREAMING_SNAKE_CASE__ , sp_model_kwargs=self.sp_model_kwargs , **SCREAMING_SNAKE_CASE__ , )
__a : List[str] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(SCREAMING_SNAKE_CASE__ ) )
__a : str = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
__a : Any = 1
# Mimic fairseq token-to-id alignment for the first 4 token
__a : str = {'<s>': 0, '<pad>': 1, '</s>': 2, '<unk>': 3}
__a : List[str] = len(self.sp_model )
__a : Optional[int] = {f'''<madeupword{i}>''': sp_size + i + self.fairseq_offset for i in range(self.num_madeup_words )}
self.fairseq_tokens_to_ids.update(SCREAMING_SNAKE_CASE__ )
__a : Dict = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def __getstate__( self : List[str] ):
'''simple docstring'''
__a : Tuple = self.__dict__.copy()
__a : List[str] = None
__a : Optional[int] = self.sp_model.serialized_model_proto()
return state
def __setstate__( self : List[str] , SCREAMING_SNAKE_CASE__ : Dict ):
'''simple docstring'''
__a : int = d
# for backward compatibility
if not hasattr(self , 'sp_model_kwargs' ):
__a : Dict = {}
__a : Tuple = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
def __lowerCAmelCase ( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : List[int] , SCREAMING_SNAKE_CASE__ : Optional[List[int]] = None ):
'''simple docstring'''
if token_ids_a is None:
return [self.sep_token_id] + token_ids_a
__a : Optional[Any] = [self.sep_token_id]
return sep + token_ids_a + sep + sep + token_ids_a
def __lowerCAmelCase ( self : Dict , SCREAMING_SNAKE_CASE__ : List[int] , SCREAMING_SNAKE_CASE__ : Optional[List[int]] = None , SCREAMING_SNAKE_CASE__ : bool = False ):
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=SCREAMING_SNAKE_CASE__ , token_ids_a=SCREAMING_SNAKE_CASE__ , already_has_special_tokens=SCREAMING_SNAKE_CASE__ )
if token_ids_a is None:
return [1] + ([0] * len(SCREAMING_SNAKE_CASE__ ))
return [1] + ([0] * len(SCREAMING_SNAKE_CASE__ )) + [1, 1] + ([0] * len(SCREAMING_SNAKE_CASE__ ))
def __lowerCAmelCase ( self : List[Any] , SCREAMING_SNAKE_CASE__ : List[int] , SCREAMING_SNAKE_CASE__ : Optional[List[int]] = None ):
'''simple docstring'''
__a : Optional[int] = [self.sep_token_id]
if token_ids_a is None:
return len(sep + token_ids_a ) * [0]
return len(sep + token_ids_a + sep + sep + token_ids_a ) * [0]
@property
def __lowerCAmelCase ( self : Optional[int] ):
'''simple docstring'''
return len(self.sp_model ) + self.fairseq_offset + self.num_madeup_words
def __lowerCAmelCase ( self : Tuple ):
'''simple docstring'''
__a : str = {self.convert_ids_to_tokens(SCREAMING_SNAKE_CASE__ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __lowerCAmelCase ( self : Any , SCREAMING_SNAKE_CASE__ : str ):
'''simple docstring'''
return self.sp_model.encode(SCREAMING_SNAKE_CASE__ , out_type=SCREAMING_SNAKE_CASE__ )
def __lowerCAmelCase ( self : List[Any] , SCREAMING_SNAKE_CASE__ : List[Any] ):
'''simple docstring'''
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
__a : List[str] = self.sp_model.PieceToId(SCREAMING_SNAKE_CASE__ )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def __lowerCAmelCase ( self : Optional[int] , SCREAMING_SNAKE_CASE__ : Dict ):
'''simple docstring'''
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def __lowerCAmelCase ( self : Tuple , SCREAMING_SNAKE_CASE__ : List[Any] ):
'''simple docstring'''
__a : Optional[int] = ''.join(SCREAMING_SNAKE_CASE__ ).replace(SCREAMING_SNAKE_CASE__ , ' ' ).strip()
return out_string
def __lowerCAmelCase ( self : Dict , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Optional[str] = None ):
'''simple docstring'''
if not os.path.isdir(SCREAMING_SNAKE_CASE__ ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
__a : Any = os.path.join(
SCREAMING_SNAKE_CASE__ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(SCREAMING_SNAKE_CASE__ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , SCREAMING_SNAKE_CASE__ )
elif not os.path.isfile(self.vocab_file ):
with open(SCREAMING_SNAKE_CASE__ , 'wb' ) as fi:
__a : List[Any] = self.sp_model.serialized_model_proto()
fi.write(SCREAMING_SNAKE_CASE__ )
return (out_vocab_file,)
| 47 | 1 |
def UpperCAmelCase__ ( lowerCamelCase_ : int ):
__a : Union[str, Any] = [1]
__a , __a , __a : Optional[int] = 0, 0, 0
__a : int = ugly_nums[ia] * 2
__a : Tuple = ugly_nums[ia] * 3
__a : List[str] = ugly_nums[ia] * 5
for _ in range(1 , lowerCamelCase_ ):
__a : Optional[Any] = min(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
ugly_nums.append(lowerCamelCase_ )
if next_num == next_a:
ia += 1
__a : List[Any] = ugly_nums[ia] * 2
if next_num == next_a:
ia += 1
__a : str = ugly_nums[ia] * 3
if next_num == next_a:
ia += 1
__a : List[str] = ugly_nums[ia] * 5
return ugly_nums[-1]
if __name__ == "__main__":
from doctest import testmod
testmod(verbose=True)
print(F"{ugly_numbers(200) = }")
| 47 |
import argparse
from collections import OrderedDict
from pathlib import Path
import torch
from transformers import (
VisualBertConfig,
VisualBertForMultipleChoice,
VisualBertForPreTraining,
VisualBertForQuestionAnswering,
VisualBertForVisualReasoning,
)
from transformers.utils import logging
logging.set_verbosity_info()
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ = [
('''bert.bert''', '''visual_bert'''),
('''bert.cls''', '''cls'''),
('''bert.classifier''', '''cls'''),
('''token_type_embeddings_visual''', '''visual_token_type_embeddings'''),
('''position_embeddings_visual''', '''visual_position_embeddings'''),
('''projection''', '''visual_projection'''),
]
SCREAMING_SNAKE_CASE__ = [
'''nlvr2_coco_pre_trained.th''',
'''nlvr2_fine_tuned.th''',
'''nlvr2_pre_trained.th''',
'''vcr_coco_pre_train.th''',
'''vcr_fine_tune.th''',
'''vcr_pre_train.th''',
'''vqa_coco_pre_trained.th''',
'''vqa_fine_tuned.th''',
'''vqa_pre_trained.th''',
]
def UpperCAmelCase__ ( lowerCamelCase_ : Optional[int] ):
__a : str = torch.load(lowerCamelCase_ , map_location='cpu' )
return sd
def UpperCAmelCase__ ( lowerCamelCase_ : List[Any] , lowerCamelCase_ : int , lowerCamelCase_ : Dict=rename_keys_prefix ):
__a : Optional[Any] = OrderedDict()
__a : Any = torch.arange(config.max_position_embeddings ).expand((1, -1) )
# detector_d = OrderedDict()
for key in d:
if "detector" in key:
# detector_d[key.replace('detector.','')] = d[key]
continue
__a : List[Any] = key
for name_pair in rename_keys_prefix:
__a : List[str] = new_key.replace(name_pair[0] , name_pair[1] )
__a : Any = d[key]
if key == "bert.cls.predictions.decoder.weight":
# Old bert code didn't have `decoder.bias`, but was added separately
__a : int = new_d['cls.predictions.bias']
return new_d
@torch.no_grad()
def UpperCAmelCase__ ( lowerCamelCase_ : Any , lowerCamelCase_ : Any ):
assert (
checkpoint_path.split('/' )[-1] in ACCEPTABLE_CHECKPOINTS
), f'''The checkpoint provided must be in {ACCEPTABLE_CHECKPOINTS}.'''
# Get Config
if "pre" in checkpoint_path:
__a : Dict = 'pretraining'
if "vcr" in checkpoint_path:
__a : int = {'visual_embedding_dim': 5_1_2}
elif "vqa_advanced" in checkpoint_path:
__a : int = {'visual_embedding_dim': 2_0_4_8}
elif "vqa" in checkpoint_path:
__a : Tuple = {'visual_embedding_dim': 2_0_4_8}
elif "nlvr" in checkpoint_path:
__a : List[Any] = {'visual_embedding_dim': 1_0_2_4}
else:
raise NotImplementedError(f'''No implementation found for `{checkpoint_path}`.''' )
else:
if "vcr" in checkpoint_path:
__a : int = {'visual_embedding_dim': 5_1_2}
__a : Any = 'multichoice'
elif "vqa_advanced" in checkpoint_path:
__a : Any = {'visual_embedding_dim': 2_0_4_8}
__a : List[str] = 'vqa_advanced'
elif "vqa" in checkpoint_path:
__a : List[Any] = {'visual_embedding_dim': 2_0_4_8, 'num_labels': 3_1_2_9}
__a : List[Any] = 'vqa'
elif "nlvr" in checkpoint_path:
__a : Optional[int] = {
'visual_embedding_dim': 1_0_2_4,
'num_labels': 2,
}
__a : Optional[Any] = 'nlvr'
__a : str = VisualBertConfig(**lowerCamelCase_ )
# Load State Dict
__a : str = load_state_dict(lowerCamelCase_ )
__a : str = get_new_dict(lowerCamelCase_ , lowerCamelCase_ )
if model_type == "pretraining":
__a : Optional[Any] = VisualBertForPreTraining(lowerCamelCase_ )
elif model_type == "vqa":
__a : Any = VisualBertForQuestionAnswering(lowerCamelCase_ )
elif model_type == "nlvr":
__a : int = VisualBertForVisualReasoning(lowerCamelCase_ )
elif model_type == "multichoice":
__a : Optional[int] = VisualBertForMultipleChoice(lowerCamelCase_ )
model.load_state_dict(lowerCamelCase_ )
# Save Checkpoints
Path(lowerCamelCase_ ).mkdir(exist_ok=lowerCamelCase_ )
model.save_pretrained(lowerCamelCase_ )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument('''orig_checkpoint_path''', type=str, help='''A path to .th on local filesystem.''')
parser.add_argument('''pytorch_dump_folder_path''', type=str, help='''Path to the output PyTorch model.''')
SCREAMING_SNAKE_CASE__ = parser.parse_args()
convert_visual_bert_checkpoint(args.orig_checkpoint_path, args.pytorch_dump_folder_path)
| 47 | 1 |
SCREAMING_SNAKE_CASE__ = [0, 2, 4, 6, 8]
SCREAMING_SNAKE_CASE__ = [1, 3, 5, 7, 9]
def UpperCAmelCase__ ( lowerCamelCase_ : int , lowerCamelCase_ : int , lowerCamelCase_ : list[int] , lowerCamelCase_ : int ):
if remaining_length == 0:
if digits[0] == 0 or digits[-1] == 0:
return 0
for i in range(length // 2 - 1 , -1 , -1 ):
remainder += digits[i] + digits[length - i - 1]
if remainder % 2 == 0:
return 0
remainder //= 1_0
return 1
if remaining_length == 1:
if remainder % 2 == 0:
return 0
__a : Any = 0
for digit in range(1_0 ):
__a : Tuple = digit
result += reversible_numbers(
0 , (remainder + 2 * digit) // 1_0 , lowerCamelCase_ , lowerCamelCase_ )
return result
__a : List[str] = 0
for digita in range(1_0 ):
__a : Union[str, Any] = digita
if (remainder + digita) % 2 == 0:
__a : int = ODD_DIGITS
else:
__a : Optional[int] = EVEN_DIGITS
for digita in other_parity_digits:
__a : int = digita
result += reversible_numbers(
remaining_length - 2 , (remainder + digita + digita) // 1_0 , lowerCamelCase_ , lowerCamelCase_ , )
return result
def UpperCAmelCase__ ( lowerCamelCase_ : int = 9 ):
__a : Union[str, Any] = 0
for length in range(1 , max_power + 1 ):
result += reversible_numbers(lowerCamelCase_ , 0 , [0] * length , lowerCamelCase_ )
return result
if __name__ == "__main__":
print(F"{solution() = }")
| 47 |
print((lambda quine: quine % quine)('''print((lambda quine: quine %% quine)(%r))'''))
| 47 | 1 |
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
if is_tf_available():
import numpy as np
import tensorflow as tf
from transformers import TFCamembertModel
@require_tf
@require_sentencepiece
@require_tokenizers
class _UpperCamelCase( unittest.TestCase ):
@slow
def __lowerCAmelCase ( self : Optional[Any] ):
'''simple docstring'''
__a : Union[str, Any] = TFCamembertModel.from_pretrained('jplu/tf-camembert-base' )
__a : Union[str, Any] = tf.convert_to_tensor(
[[5, 1_2_1, 1_1, 6_6_0, 1_6, 7_3_0, 2_5_5_4_3, 1_1_0, 8_3, 6]] , dtype=tf.intaa , ) # J'aime le camembert !"
__a : str = model(SCREAMING_SNAKE_CASE__ )['last_hidden_state']
__a : List[Any] = tf.TensorShape((1, 1_0, 7_6_8) )
self.assertEqual(output.shape , SCREAMING_SNAKE_CASE__ )
# compare the actual values for a slice.
__a : str = tf.convert_to_tensor(
[[[-0.0_254, 0.0_235, 0.1_027], [0.0_606, -0.1_811, -0.0_418], [-0.1_561, -0.1_127, 0.2_687]]] , dtype=tf.floataa , )
# camembert = torch.hub.load('pytorch/fairseq', 'camembert.v0')
# camembert.eval()
# expected_slice = roberta.model.forward(input_ids)[0][:, :3, :3].detach()
self.assertTrue(np.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1e-4 ) )
| 47 |
import json
import os
import shutil
import tempfile
from unittest import TestCase
from transformers import BartTokenizer, BartTokenizerFast, DPRQuestionEncoderTokenizer, DPRQuestionEncoderTokenizerFast
from transformers.models.bart.configuration_bart import BartConfig
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES as DPR_VOCAB_FILES_NAMES
from transformers.models.dpr.configuration_dpr import DPRConfig
from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES as BART_VOCAB_FILES_NAMES
from transformers.testing_utils import require_faiss, require_tokenizers, require_torch, slow
from transformers.utils import is_datasets_available, is_faiss_available, is_torch_available
if is_torch_available() and is_datasets_available() and is_faiss_available():
from transformers.models.rag.configuration_rag import RagConfig
from transformers.models.rag.tokenization_rag import RagTokenizer
@require_faiss
@require_torch
class _UpperCamelCase( __lowerCamelCase ):
def __lowerCAmelCase ( self : List[Any] ):
'''simple docstring'''
__a : List[Any] = tempfile.mkdtemp()
__a : int = 8
# DPR tok
__a : Dict = [
'[UNK]',
'[CLS]',
'[SEP]',
'[PAD]',
'[MASK]',
'want',
'##want',
'##ed',
'wa',
'un',
'runn',
'##ing',
',',
'low',
'lowest',
]
__a : int = os.path.join(self.tmpdirname , 'dpr_tokenizer' )
os.makedirs(SCREAMING_SNAKE_CASE__ , exist_ok=SCREAMING_SNAKE_CASE__ )
__a : Dict = os.path.join(SCREAMING_SNAKE_CASE__ , DPR_VOCAB_FILES_NAMES['vocab_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) )
# BART tok
__a : str = [
'l',
'o',
'w',
'e',
'r',
's',
't',
'i',
'd',
'n',
'\u0120',
'\u0120l',
'\u0120n',
'\u0120lo',
'\u0120low',
'er',
'\u0120lowest',
'\u0120newer',
'\u0120wider',
'<unk>',
]
__a : Optional[int] = dict(zip(SCREAMING_SNAKE_CASE__ , range(len(SCREAMING_SNAKE_CASE__ ) ) ) )
__a : List[str] = ['#version: 0.2', '\u0120 l', '\u0120l o', '\u0120lo w', 'e r', '']
__a : List[str] = {'unk_token': '<unk>'}
__a : Dict = os.path.join(self.tmpdirname , 'bart_tokenizer' )
os.makedirs(SCREAMING_SNAKE_CASE__ , exist_ok=SCREAMING_SNAKE_CASE__ )
__a : List[Any] = os.path.join(SCREAMING_SNAKE_CASE__ , BART_VOCAB_FILES_NAMES['vocab_file'] )
__a : Dict = os.path.join(SCREAMING_SNAKE_CASE__ , BART_VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp:
fp.write(json.dumps(SCREAMING_SNAKE_CASE__ ) + '\n' )
with open(self.merges_file , 'w' , encoding='utf-8' ) as fp:
fp.write('\n'.join(SCREAMING_SNAKE_CASE__ ) )
def __lowerCAmelCase ( self : Tuple ):
'''simple docstring'''
return DPRQuestionEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname , 'dpr_tokenizer' ) )
def __lowerCAmelCase ( self : str ):
'''simple docstring'''
return BartTokenizer.from_pretrained(os.path.join(self.tmpdirname , 'bart_tokenizer' ) )
def __lowerCAmelCase ( self : Union[str, Any] ):
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
@require_tokenizers
def __lowerCAmelCase ( self : Dict ):
'''simple docstring'''
__a : Tuple = os.path.join(self.tmpdirname , 'rag_tokenizer' )
__a : Optional[Any] = RagConfig(question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() )
__a : Optional[Any] = RagTokenizer(question_encoder=self.get_dpr_tokenizer() , generator=self.get_bart_tokenizer() )
rag_config.save_pretrained(SCREAMING_SNAKE_CASE__ )
rag_tokenizer.save_pretrained(SCREAMING_SNAKE_CASE__ )
__a : List[Any] = RagTokenizer.from_pretrained(SCREAMING_SNAKE_CASE__ , config=SCREAMING_SNAKE_CASE__ )
self.assertIsInstance(new_rag_tokenizer.question_encoder , SCREAMING_SNAKE_CASE__ )
self.assertEqual(new_rag_tokenizer.question_encoder.get_vocab() , rag_tokenizer.question_encoder.get_vocab() )
self.assertIsInstance(new_rag_tokenizer.generator , SCREAMING_SNAKE_CASE__ )
self.assertEqual(new_rag_tokenizer.generator.get_vocab() , rag_tokenizer.generator.get_vocab() )
@slow
def __lowerCAmelCase ( self : Optional[Any] ):
'''simple docstring'''
__a : Optional[Any] = RagTokenizer.from_pretrained('facebook/rag-token-nq' )
__a : List[Any] = [
'who got the first nobel prize in physics',
'when is the next deadpool movie being released',
'which mode is used for short wave broadcast service',
'who is the owner of reading football club',
'when is the next scandal episode coming out',
'when is the last time the philadelphia won the superbowl',
'what is the most current adobe flash player version',
'how many episodes are there in dragon ball z',
'what is the first step in the evolution of the eye',
'where is gall bladder situated in human body',
'what is the main mineral in lithium batteries',
'who is the president of usa right now',
'where do the greasers live in the outsiders',
'panda is a national animal of which country',
'what is the name of manchester united stadium',
]
__a : Tuple = tokenizer(SCREAMING_SNAKE_CASE__ )
self.assertIsNotNone(SCREAMING_SNAKE_CASE__ )
@slow
def __lowerCAmelCase ( self : Optional[int] ):
'''simple docstring'''
__a : Any = RagTokenizer.from_pretrained('facebook/rag-sequence-nq' )
__a : Union[str, Any] = [
'who got the first nobel prize in physics',
'when is the next deadpool movie being released',
'which mode is used for short wave broadcast service',
'who is the owner of reading football club',
'when is the next scandal episode coming out',
'when is the last time the philadelphia won the superbowl',
'what is the most current adobe flash player version',
'how many episodes are there in dragon ball z',
'what is the first step in the evolution of the eye',
'where is gall bladder situated in human body',
'what is the main mineral in lithium batteries',
'who is the president of usa right now',
'where do the greasers live in the outsiders',
'panda is a national animal of which country',
'what is the name of manchester united stadium',
]
__a : str = tokenizer(SCREAMING_SNAKE_CASE__ )
self.assertIsNotNone(SCREAMING_SNAKE_CASE__ )
| 47 | 1 |
import argparse
import collections
import json
import os
import re
import string
import sys
import numpy as np
SCREAMING_SNAKE_CASE__ = re.compile(r'''\b(a|an|the)\b''', re.UNICODE)
SCREAMING_SNAKE_CASE__ = None
def UpperCAmelCase__ ( ):
__a : Optional[Any] = argparse.ArgumentParser('Official evaluation script for SQuAD version 2.0.' )
parser.add_argument('data_file' , metavar='data.json' , help='Input data JSON file.' )
parser.add_argument('pred_file' , metavar='pred.json' , help='Model predictions.' )
parser.add_argument(
'--out-file' , '-o' , metavar='eval.json' , help='Write accuracy metrics to file (default is stdout).' )
parser.add_argument(
'--na-prob-file' , '-n' , metavar='na_prob.json' , help='Model estimates of probability of no answer.' )
parser.add_argument(
'--na-prob-thresh' , '-t' , type=lowerCamelCase_ , default=1.0 , help='Predict "" if no-answer probability exceeds this (default = 1.0).' , )
parser.add_argument(
'--out-image-dir' , '-p' , metavar='out_images' , default=lowerCamelCase_ , help='Save precision-recall curves to directory.' )
parser.add_argument('--verbose' , '-v' , action='store_true' )
if len(sys.argv ) == 1:
parser.print_help()
sys.exit(1 )
return parser.parse_args()
def UpperCAmelCase__ ( lowerCamelCase_ : str ):
__a : Union[str, Any] = {}
for article in dataset:
for p in article["paragraphs"]:
for qa in p["qas"]:
__a : int = bool(qa['answers']['text'] )
return qid_to_has_ans
def UpperCAmelCase__ ( lowerCamelCase_ : Dict ):
def remove_articles(lowerCamelCase_ : List[Any] ):
return ARTICLES_REGEX.sub(' ' , lowerCamelCase_ )
def white_space_fix(lowerCamelCase_ : Optional[Any] ):
return " ".join(text.split() )
def remove_punc(lowerCamelCase_ : Optional[Any] ):
__a : Tuple = set(string.punctuation )
return "".join(ch for ch in text if ch not in exclude )
def lower(lowerCamelCase_ : Any ):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(lowerCamelCase_ ) ) ) )
def UpperCAmelCase__ ( lowerCamelCase_ : List[str] ):
if not s:
return []
return normalize_answer(lowerCamelCase_ ).split()
def UpperCAmelCase__ ( lowerCamelCase_ : str , lowerCamelCase_ : Optional[Any] ):
return int(normalize_answer(lowerCamelCase_ ) == normalize_answer(lowerCamelCase_ ) )
def UpperCAmelCase__ ( lowerCamelCase_ : int , lowerCamelCase_ : Any ):
__a : int = get_tokens(lowerCamelCase_ )
__a : Optional[int] = get_tokens(lowerCamelCase_ )
__a : Any = collections.Counter(lowerCamelCase_ ) & collections.Counter(lowerCamelCase_ )
__a : List[str] = sum(common.values() )
if len(lowerCamelCase_ ) == 0 or len(lowerCamelCase_ ) == 0:
# If either is no-answer, then F1 is 1 if they agree, 0 otherwise
return int(gold_toks == pred_toks )
if num_same == 0:
return 0
__a : List[str] = 1.0 * num_same / len(lowerCamelCase_ )
__a : Union[str, Any] = 1.0 * num_same / len(lowerCamelCase_ )
__a : int = (2 * precision * recall) / (precision + recall)
return fa
def UpperCAmelCase__ ( lowerCamelCase_ : Union[str, Any] , lowerCamelCase_ : List[Any] ):
__a : List[str] = {}
__a : str = {}
for article in dataset:
for p in article["paragraphs"]:
for qa in p["qas"]:
__a : Optional[int] = qa['id']
__a : Union[str, Any] = [t for t in qa['answers']['text'] if normalize_answer(lowerCamelCase_ )]
if not gold_answers:
# For unanswerable questions, only correct answer is empty string
__a : List[Any] = ['']
if qid not in preds:
print(f'''Missing prediction for {qid}''' )
continue
__a : List[str] = preds[qid]
# Take max over all gold answers
__a : List[str] = max(compute_exact(lowerCamelCase_ , lowerCamelCase_ ) for a in gold_answers )
__a : Optional[Any] = max(compute_fa(lowerCamelCase_ , lowerCamelCase_ ) for a in gold_answers )
return exact_scores, fa_scores
def UpperCAmelCase__ ( lowerCamelCase_ : str , lowerCamelCase_ : Dict , lowerCamelCase_ : Dict , lowerCamelCase_ : Dict ):
__a : List[Any] = {}
for qid, s in scores.items():
__a : Any = na_probs[qid] > na_prob_thresh
if pred_na:
__a : Any = float(not qid_to_has_ans[qid] )
else:
__a : List[str] = s
return new_scores
def UpperCAmelCase__ ( lowerCamelCase_ : Dict , lowerCamelCase_ : List[Any] , lowerCamelCase_ : Union[str, Any]=None ):
if not qid_list:
__a : Optional[Any] = len(lowerCamelCase_ )
return collections.OrderedDict(
[
('exact', 100.0 * sum(exact_scores.values() ) / total),
('f1', 100.0 * sum(fa_scores.values() ) / total),
('total', total),
] )
else:
__a : Dict = len(lowerCamelCase_ )
return collections.OrderedDict(
[
('exact', 100.0 * sum(exact_scores[k] for k in qid_list ) / total),
('f1', 100.0 * sum(fa_scores[k] for k in qid_list ) / total),
('total', total),
] )
def UpperCAmelCase__ ( lowerCamelCase_ : str , lowerCamelCase_ : int , lowerCamelCase_ : Optional[int] ):
for k in new_eval:
__a : Tuple = new_eval[k]
def UpperCAmelCase__ ( lowerCamelCase_ : Optional[int] , lowerCamelCase_ : str , lowerCamelCase_ : Union[str, Any] , lowerCamelCase_ : Optional[int] ):
plt.step(lowerCamelCase_ , lowerCamelCase_ , color='b' , alpha=0.2 , where='post' )
plt.fill_between(lowerCamelCase_ , lowerCamelCase_ , step='post' , alpha=0.2 , color='b' )
plt.xlabel('Recall' )
plt.ylabel('Precision' )
plt.xlim([0.0, 1.05] )
plt.ylim([0.0, 1.05] )
plt.title(lowerCamelCase_ )
plt.savefig(lowerCamelCase_ )
plt.clf()
def UpperCAmelCase__ ( lowerCamelCase_ : Optional[int] , lowerCamelCase_ : Optional[Any] , lowerCamelCase_ : List[Any] , lowerCamelCase_ : Any , lowerCamelCase_ : List[str]=None , lowerCamelCase_ : Optional[Any]=None ):
__a : Tuple = sorted(lowerCamelCase_ , key=lambda lowerCamelCase_ : na_probs[k] )
__a : Any = 0.0
__a : List[str] = 1.0
__a : Any = 0.0
__a : Optional[int] = [1.0]
__a : str = [0.0]
__a : Union[str, Any] = 0.0
for i, qid in enumerate(lowerCamelCase_ ):
if qid_to_has_ans[qid]:
true_pos += scores[qid]
__a : Union[str, Any] = true_pos / float(i + 1 )
__a : Tuple = true_pos / float(lowerCamelCase_ )
if i == len(lowerCamelCase_ ) - 1 or na_probs[qid] != na_probs[qid_list[i + 1]]:
# i.e., if we can put a threshold after this point
avg_prec += cur_p * (cur_r - recalls[-1])
precisions.append(lowerCamelCase_ )
recalls.append(lowerCamelCase_ )
if out_image:
plot_pr_curve(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
return {"ap": 100.0 * avg_prec}
def UpperCAmelCase__ ( lowerCamelCase_ : int , lowerCamelCase_ : int , lowerCamelCase_ : Union[str, Any] , lowerCamelCase_ : int , lowerCamelCase_ : Optional[int] , lowerCamelCase_ : Union[str, Any] ):
if out_image_dir and not os.path.exists(lowerCamelCase_ ):
os.makedirs(lowerCamelCase_ )
__a : List[str] = sum(1 for v in qid_to_has_ans.values() if v )
if num_true_pos == 0:
return
__a : Any = make_precision_recall_eval(
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , out_image=os.path.join(lowerCamelCase_ , 'pr_exact.png' ) , title='Precision-Recall curve for Exact Match score' , )
__a : Tuple = make_precision_recall_eval(
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , out_image=os.path.join(lowerCamelCase_ , 'pr_f1.png' ) , title='Precision-Recall curve for F1 score' , )
__a : str = {k: float(lowerCamelCase_ ) for k, v in qid_to_has_ans.items()}
__a : List[str] = make_precision_recall_eval(
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , out_image=os.path.join(lowerCamelCase_ , 'pr_oracle.png' ) , title='Oracle Precision-Recall curve (binary task of HasAns vs. NoAns)' , )
merge_eval(lowerCamelCase_ , lowerCamelCase_ , 'pr_exact' )
merge_eval(lowerCamelCase_ , lowerCamelCase_ , 'pr_f1' )
merge_eval(lowerCamelCase_ , lowerCamelCase_ , 'pr_oracle' )
def UpperCAmelCase__ ( lowerCamelCase_ : Any , lowerCamelCase_ : Optional[int] , lowerCamelCase_ : Optional[Any] , lowerCamelCase_ : Any ):
if not qid_list:
return
__a : List[Any] = [na_probs[k] for k in qid_list]
__a : Tuple = np.ones_like(lowerCamelCase_ ) / float(len(lowerCamelCase_ ) )
plt.hist(lowerCamelCase_ , weights=lowerCamelCase_ , bins=2_0 , range=(0.0, 1.0) )
plt.xlabel('Model probability of no-answer' )
plt.ylabel('Proportion of dataset' )
plt.title(f'''Histogram of no-answer probability: {name}''' )
plt.savefig(os.path.join(lowerCamelCase_ , f'''na_prob_hist_{name}.png''' ) )
plt.clf()
def UpperCAmelCase__ ( lowerCamelCase_ : Optional[Any] , lowerCamelCase_ : Optional[int] , lowerCamelCase_ : Optional[int] , lowerCamelCase_ : List[str] ):
__a : List[Any] = sum(1 for k in qid_to_has_ans if not qid_to_has_ans[k] )
__a : Dict = num_no_ans
__a : List[Any] = cur_score
__a : List[Any] = 0.0
__a : Dict = sorted(lowerCamelCase_ , key=lambda lowerCamelCase_ : na_probs[k] )
for i, qid in enumerate(lowerCamelCase_ ):
if qid not in scores:
continue
if qid_to_has_ans[qid]:
__a : Optional[Any] = scores[qid]
else:
if preds[qid]:
__a : Any = -1
else:
__a : Any = 0
cur_score += diff
if cur_score > best_score:
__a : List[str] = cur_score
__a : List[Any] = na_probs[qid]
return 100.0 * best_score / len(lowerCamelCase_ ), best_thresh
def UpperCAmelCase__ ( lowerCamelCase_ : int , lowerCamelCase_ : Any , lowerCamelCase_ : str , lowerCamelCase_ : int , lowerCamelCase_ : str , lowerCamelCase_ : List[Any] ):
__a , __a : Optional[int] = find_best_thresh(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
__a , __a : int = find_best_thresh(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
__a : str = best_exact
__a : Union[str, Any] = exact_thresh
__a : Dict = best_fa
__a : List[Any] = fa_thresh
def UpperCAmelCase__ ( ):
with open(OPTS.data_file ) as f:
__a : Dict = json.load(lowerCamelCase_ )
__a : Union[str, Any] = dataset_json['data']
with open(OPTS.pred_file ) as f:
__a : Optional[int] = json.load(lowerCamelCase_ )
if OPTS.na_prob_file:
with open(OPTS.na_prob_file ) as f:
__a : List[Any] = json.load(lowerCamelCase_ )
else:
__a : Optional[int] = {k: 0.0 for k in preds}
__a : List[Any] = make_qid_to_has_ans(lowerCamelCase_ ) # maps qid to True/False
__a : int = [k for k, v in qid_to_has_ans.items() if v]
__a : int = [k for k, v in qid_to_has_ans.items() if not v]
__a , __a : int = get_raw_scores(lowerCamelCase_ , lowerCamelCase_ )
__a : List[str] = apply_no_ans_threshold(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , OPTS.na_prob_thresh )
__a : int = apply_no_ans_threshold(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , OPTS.na_prob_thresh )
__a : str = make_eval_dict(lowerCamelCase_ , lowerCamelCase_ )
if has_ans_qids:
__a : int = make_eval_dict(lowerCamelCase_ , lowerCamelCase_ , qid_list=lowerCamelCase_ )
merge_eval(lowerCamelCase_ , lowerCamelCase_ , 'HasAns' )
if no_ans_qids:
__a : List[Any] = make_eval_dict(lowerCamelCase_ , lowerCamelCase_ , qid_list=lowerCamelCase_ )
merge_eval(lowerCamelCase_ , lowerCamelCase_ , 'NoAns' )
if OPTS.na_prob_file:
find_all_best_thresh(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
if OPTS.na_prob_file and OPTS.out_image_dir:
run_precision_recall_analysis(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , OPTS.out_image_dir )
histogram_na_prob(lowerCamelCase_ , lowerCamelCase_ , OPTS.out_image_dir , 'hasAns' )
histogram_na_prob(lowerCamelCase_ , lowerCamelCase_ , OPTS.out_image_dir , 'noAns' )
if OPTS.out_file:
with open(OPTS.out_file , 'w' ) as f:
json.dump(lowerCamelCase_ , lowerCamelCase_ )
else:
print(json.dumps(lowerCamelCase_ , indent=2 ) )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ = parse_args()
if OPTS.out_image_dir:
import matplotlib
matplotlib.use('''Agg''')
import matplotlib.pyplot as plt
main()
| 47 |
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ = {'''vocab_file''': '''spiece.model'''}
SCREAMING_SNAKE_CASE__ = {
'''vocab_file''': {
'''bert_for_seq_generation''': (
'''https://huggingface.co/google/bert_for_seq_generation_L-24_bbc_encoder/resolve/main/spiece.model'''
),
}
}
SCREAMING_SNAKE_CASE__ = {'''bert_for_seq_generation''': 512}
class _UpperCamelCase( __lowerCamelCase ):
__SCREAMING_SNAKE_CASE : int = VOCAB_FILES_NAMES
__SCREAMING_SNAKE_CASE : List[str] = PRETRAINED_VOCAB_FILES_MAP
__SCREAMING_SNAKE_CASE : Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__SCREAMING_SNAKE_CASE : List[int] = []
__SCREAMING_SNAKE_CASE : int = ['''input_ids''', '''attention_mask''']
def __init__( self : str , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : List[Any]="<s>" , SCREAMING_SNAKE_CASE__ : Tuple="</s>" , SCREAMING_SNAKE_CASE__ : Any="<unk>" , SCREAMING_SNAKE_CASE__ : int="<pad>" , SCREAMING_SNAKE_CASE__ : List[str]="<::::>" , SCREAMING_SNAKE_CASE__ : Optional[Dict[str, Any]] = None , **SCREAMING_SNAKE_CASE__ : Tuple , ):
'''simple docstring'''
__a : Tuple = {} if sp_model_kwargs is None else sp_model_kwargs
# Add extra_ids to the special token list
super().__init__(
bos_token=SCREAMING_SNAKE_CASE__ , eos_token=SCREAMING_SNAKE_CASE__ , unk_token=SCREAMING_SNAKE_CASE__ , pad_token=SCREAMING_SNAKE_CASE__ , sep_token=SCREAMING_SNAKE_CASE__ , sp_model_kwargs=self.sp_model_kwargs , **SCREAMING_SNAKE_CASE__ , )
__a : int = vocab_file
__a : Union[str, Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(SCREAMING_SNAKE_CASE__ )
@property
def __lowerCAmelCase ( self : Optional[int] ):
'''simple docstring'''
return self.sp_model.get_piece_size()
def __lowerCAmelCase ( self : int ):
'''simple docstring'''
__a : Dict = {self.convert_ids_to_tokens(SCREAMING_SNAKE_CASE__ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : Optional[Any] ):
'''simple docstring'''
__a : Union[str, Any] = self.__dict__.copy()
__a : Any = None
return state
def __setstate__( self : int , SCREAMING_SNAKE_CASE__ : List[str] ):
'''simple docstring'''
__a : str = d
# for backward compatibility
if not hasattr(self , 'sp_model_kwargs' ):
__a : str = {}
__a : List[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def __lowerCAmelCase ( self : Dict , SCREAMING_SNAKE_CASE__ : str ):
'''simple docstring'''
return self.sp_model.encode(SCREAMING_SNAKE_CASE__ , out_type=SCREAMING_SNAKE_CASE__ )
def __lowerCAmelCase ( self : str , SCREAMING_SNAKE_CASE__ : Optional[int] ):
'''simple docstring'''
return self.sp_model.piece_to_id(SCREAMING_SNAKE_CASE__ )
def __lowerCAmelCase ( self : int , SCREAMING_SNAKE_CASE__ : str ):
'''simple docstring'''
__a : int = self.sp_model.IdToPiece(SCREAMING_SNAKE_CASE__ )
return token
def __lowerCAmelCase ( self : Tuple , SCREAMING_SNAKE_CASE__ : Optional[int] ):
'''simple docstring'''
__a : Optional[Any] = []
__a : Optional[int] = ''
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(SCREAMING_SNAKE_CASE__ ) + token
__a : Dict = []
else:
current_sub_tokens.append(SCREAMING_SNAKE_CASE__ )
out_string += self.sp_model.decode(SCREAMING_SNAKE_CASE__ )
return out_string.strip()
def __lowerCAmelCase ( self : List[Any] , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Optional[str] = None ):
'''simple docstring'''
if not os.path.isdir(SCREAMING_SNAKE_CASE__ ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
__a : Tuple = os.path.join(
SCREAMING_SNAKE_CASE__ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(SCREAMING_SNAKE_CASE__ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , SCREAMING_SNAKE_CASE__ )
elif not os.path.isfile(self.vocab_file ):
with open(SCREAMING_SNAKE_CASE__ , 'wb' ) as fi:
__a : List[str] = self.sp_model.serialized_model_proto()
fi.write(SCREAMING_SNAKE_CASE__ )
return (out_vocab_file,)
| 47 | 1 |
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
HubertConfig,
HubertForCTC,
HubertModel,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaProcessor,
logging,
)
logging.set_verbosity_info()
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ = {
'''post_extract_proj''': '''feature_projection.projection''',
'''encoder.pos_conv.0''': '''encoder.pos_conv_embed.conv''',
'''self_attn.k_proj''': '''encoder.layers.*.attention.k_proj''',
'''self_attn.v_proj''': '''encoder.layers.*.attention.v_proj''',
'''self_attn.q_proj''': '''encoder.layers.*.attention.q_proj''',
'''self_attn.out_proj''': '''encoder.layers.*.attention.out_proj''',
'''self_attn_layer_norm''': '''encoder.layers.*.layer_norm''',
'''fc1''': '''encoder.layers.*.feed_forward.intermediate_dense''',
'''fc2''': '''encoder.layers.*.feed_forward.output_dense''',
'''final_layer_norm''': '''encoder.layers.*.final_layer_norm''',
'''encoder.layer_norm''': '''encoder.layer_norm''',
'''w2v_model.layer_norm''': '''feature_projection.layer_norm''',
'''w2v_encoder.proj''': '''lm_head''',
'''mask_emb''': '''masked_spec_embed''',
}
def UpperCAmelCase__ ( lowerCamelCase_ : List[str] , lowerCamelCase_ : int , lowerCamelCase_ : Any , lowerCamelCase_ : Optional[int] , lowerCamelCase_ : Dict ):
for attribute in key.split('.' ):
__a : Dict = getattr(lowerCamelCase_ , lowerCamelCase_ )
if weight_type is not None:
__a : int = getattr(lowerCamelCase_ , lowerCamelCase_ ).shape
else:
__a : Optional[Any] = hf_pointer.shape
assert hf_shape == value.shape, (
f'''Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'''
f''' {value.shape} for {full_name}'''
)
if weight_type == "weight":
__a : Optional[int] = value
elif weight_type == "weight_g":
__a : str = value
elif weight_type == "weight_v":
__a : Optional[Any] = value
elif weight_type == "bias":
__a : str = value
else:
__a : int = value
logger.info(f'''{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.''' )
def UpperCAmelCase__ ( lowerCamelCase_ : Optional[int] , lowerCamelCase_ : Union[str, Any] , lowerCamelCase_ : Tuple ):
__a : str = []
__a : int = fairseq_model.state_dict()
__a : Dict = hf_model.hubert.feature_extractor if is_finetuned else hf_model.feature_extractor
for name, value in fairseq_dict.items():
__a : Tuple = False
if "conv_layers" in name:
load_conv_layer(
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , hf_model.config.feat_extract_norm == 'group' , )
__a : List[Any] = True
else:
for key, mapped_key in MAPPING.items():
__a : List[str] = 'hubert.' + mapped_key if (is_finetuned and mapped_key != 'lm_head') else mapped_key
if key in name or (key.split('w2v_model.' )[-1] == name.split('.' )[0] and not is_finetuned):
__a : List[str] = True
if "*" in mapped_key:
__a : Optional[Any] = name.split(lowerCamelCase_ )[0].split('.' )[-2]
__a : Optional[int] = mapped_key.replace('*' , lowerCamelCase_ )
if "weight_g" in name:
__a : Optional[int] = 'weight_g'
elif "weight_v" in name:
__a : List[str] = 'weight_v'
elif "weight" in name:
__a : str = 'weight'
elif "bias" in name:
__a : Dict = 'bias'
else:
__a : Tuple = None
set_recursively(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
continue
if not is_used:
unused_weights.append(lowerCamelCase_ )
logger.warning(f'''Unused weights: {unused_weights}''' )
def UpperCAmelCase__ ( lowerCamelCase_ : List[Any] , lowerCamelCase_ : int , lowerCamelCase_ : Dict , lowerCamelCase_ : Dict , lowerCamelCase_ : Union[str, Any] ):
__a : Union[str, Any] = full_name.split('conv_layers.' )[-1]
__a : Union[str, Any] = name.split('.' )
__a : List[Any] = int(items[0] )
__a : List[str] = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.'''
)
__a : List[str] = value
logger.info(f'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.'''
)
__a : Tuple = value
logger.info(f'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
f'''{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was'''
" found."
)
__a : str = value
logger.info(f'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.'''
)
__a : List[str] = value
logger.info(f'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
else:
unused_weights.append(lowerCamelCase_ )
@torch.no_grad()
def UpperCAmelCase__ ( lowerCamelCase_ : Optional[Any] , lowerCamelCase_ : Union[str, Any] , lowerCamelCase_ : Dict=None , lowerCamelCase_ : Tuple=None , lowerCamelCase_ : Union[str, Any]=True ):
if config_path is not None:
__a : Optional[Any] = HubertConfig.from_pretrained(lowerCamelCase_ )
else:
__a : str = HubertConfig()
if is_finetuned:
if dict_path:
__a : Dict = Dictionary.load(lowerCamelCase_ )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
__a : List[Any] = target_dict.pad_index
__a : List[Any] = target_dict.bos_index
__a : Tuple = target_dict.eos_index
__a : List[str] = len(target_dict.symbols )
__a : Union[str, Any] = os.path.join(lowerCamelCase_ , 'vocab.json' )
if not os.path.isdir(lowerCamelCase_ ):
logger.error('--pytorch_dump_folder_path ({}) should be a directory'.format(lowerCamelCase_ ) )
return
os.makedirs(lowerCamelCase_ , exist_ok=lowerCamelCase_ )
with open(lowerCamelCase_ , 'w' , encoding='utf-8' ) as vocab_handle:
json.dump(target_dict.indices , lowerCamelCase_ )
__a : Optional[int] = WavaVecaCTCTokenizer(
lowerCamelCase_ , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token='|' , do_lower_case=lowerCamelCase_ , )
__a : Dict = True if config.feat_extract_norm == 'layer' else False
__a : Tuple = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_6_0_0_0 , padding_value=0 , do_normalize=lowerCamelCase_ , return_attention_mask=lowerCamelCase_ , )
__a : List[Any] = WavaVecaProcessor(feature_extractor=lowerCamelCase_ , tokenizer=lowerCamelCase_ )
processor.save_pretrained(lowerCamelCase_ )
__a : Union[str, Any] = HubertForCTC(lowerCamelCase_ )
else:
__a : int = HubertModel(lowerCamelCase_ )
if is_finetuned:
__a , __a , __a : Any = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={'data': '/'.join(dict_path.split('/' )[:-1] )} )
else:
__a , __a , __a : Tuple = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] )
__a : Optional[Any] = model[0].eval()
recursively_load_weights(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
hf_wavavec.save_pretrained(lowerCamelCase_ )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ = argparse.ArgumentParser()
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to fairseq checkpoint''')
parser.add_argument('''--dict_path''', default=None, type=str, help='''Path to dict of fine-tuned model''')
parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''')
parser.add_argument(
'''--not_finetuned''', action='''store_true''', help='''Whether the model to convert is a fine-tuned model or not'''
)
SCREAMING_SNAKE_CASE__ = parser.parse_args()
convert_hubert_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
)
| 47 |
from ..utils import DummyObject, requires_backends
class _UpperCamelCase( metaclass=__lowerCamelCase ):
__SCREAMING_SNAKE_CASE : Optional[Any] = ['''torch''', '''transformers''', '''onnx''']
def __init__( self : Dict , *SCREAMING_SNAKE_CASE__ : Any , **SCREAMING_SNAKE_CASE__ : Any ):
'''simple docstring'''
requires_backends(self , ['torch', 'transformers', 'onnx'] )
@classmethod
def __lowerCAmelCase ( cls : Tuple , *SCREAMING_SNAKE_CASE__ : Optional[Any] , **SCREAMING_SNAKE_CASE__ : Optional[int] ):
'''simple docstring'''
requires_backends(cls , ['torch', 'transformers', 'onnx'] )
@classmethod
def __lowerCAmelCase ( cls : Any , *SCREAMING_SNAKE_CASE__ : Optional[Any] , **SCREAMING_SNAKE_CASE__ : Tuple ):
'''simple docstring'''
requires_backends(cls , ['torch', 'transformers', 'onnx'] )
class _UpperCamelCase( metaclass=__lowerCamelCase ):
__SCREAMING_SNAKE_CASE : List[Any] = ['''torch''', '''transformers''', '''onnx''']
def __init__( self : Any , *SCREAMING_SNAKE_CASE__ : int , **SCREAMING_SNAKE_CASE__ : List[Any] ):
'''simple docstring'''
requires_backends(self , ['torch', 'transformers', 'onnx'] )
@classmethod
def __lowerCAmelCase ( cls : List[str] , *SCREAMING_SNAKE_CASE__ : Tuple , **SCREAMING_SNAKE_CASE__ : int ):
'''simple docstring'''
requires_backends(cls , ['torch', 'transformers', 'onnx'] )
@classmethod
def __lowerCAmelCase ( cls : str , *SCREAMING_SNAKE_CASE__ : int , **SCREAMING_SNAKE_CASE__ : List[Any] ):
'''simple docstring'''
requires_backends(cls , ['torch', 'transformers', 'onnx'] )
class _UpperCamelCase( metaclass=__lowerCamelCase ):
__SCREAMING_SNAKE_CASE : Any = ['''torch''', '''transformers''', '''onnx''']
def __init__( self : Tuple , *SCREAMING_SNAKE_CASE__ : List[str] , **SCREAMING_SNAKE_CASE__ : List[Any] ):
'''simple docstring'''
requires_backends(self , ['torch', 'transformers', 'onnx'] )
@classmethod
def __lowerCAmelCase ( cls : str , *SCREAMING_SNAKE_CASE__ : Any , **SCREAMING_SNAKE_CASE__ : Union[str, Any] ):
'''simple docstring'''
requires_backends(cls , ['torch', 'transformers', 'onnx'] )
@classmethod
def __lowerCAmelCase ( cls : Dict , *SCREAMING_SNAKE_CASE__ : List[Any] , **SCREAMING_SNAKE_CASE__ : int ):
'''simple docstring'''
requires_backends(cls , ['torch', 'transformers', 'onnx'] )
class _UpperCamelCase( metaclass=__lowerCamelCase ):
__SCREAMING_SNAKE_CASE : Dict = ['''torch''', '''transformers''', '''onnx''']
def __init__( self : Any , *SCREAMING_SNAKE_CASE__ : Dict , **SCREAMING_SNAKE_CASE__ : int ):
'''simple docstring'''
requires_backends(self , ['torch', 'transformers', 'onnx'] )
@classmethod
def __lowerCAmelCase ( cls : int , *SCREAMING_SNAKE_CASE__ : Tuple , **SCREAMING_SNAKE_CASE__ : Tuple ):
'''simple docstring'''
requires_backends(cls , ['torch', 'transformers', 'onnx'] )
@classmethod
def __lowerCAmelCase ( cls : Any , *SCREAMING_SNAKE_CASE__ : int , **SCREAMING_SNAKE_CASE__ : str ):
'''simple docstring'''
requires_backends(cls , ['torch', 'transformers', 'onnx'] )
class _UpperCamelCase( metaclass=__lowerCamelCase ):
__SCREAMING_SNAKE_CASE : int = ['''torch''', '''transformers''', '''onnx''']
def __init__( self : List[str] , *SCREAMING_SNAKE_CASE__ : Tuple , **SCREAMING_SNAKE_CASE__ : List[Any] ):
'''simple docstring'''
requires_backends(self , ['torch', 'transformers', 'onnx'] )
@classmethod
def __lowerCAmelCase ( cls : int , *SCREAMING_SNAKE_CASE__ : Tuple , **SCREAMING_SNAKE_CASE__ : Union[str, Any] ):
'''simple docstring'''
requires_backends(cls , ['torch', 'transformers', 'onnx'] )
@classmethod
def __lowerCAmelCase ( cls : Optional[Any] , *SCREAMING_SNAKE_CASE__ : List[Any] , **SCREAMING_SNAKE_CASE__ : int ):
'''simple docstring'''
requires_backends(cls , ['torch', 'transformers', 'onnx'] )
class _UpperCamelCase( metaclass=__lowerCamelCase ):
__SCREAMING_SNAKE_CASE : Optional[int] = ['''torch''', '''transformers''', '''onnx''']
def __init__( self : Optional[int] , *SCREAMING_SNAKE_CASE__ : Dict , **SCREAMING_SNAKE_CASE__ : Any ):
'''simple docstring'''
requires_backends(self , ['torch', 'transformers', 'onnx'] )
@classmethod
def __lowerCAmelCase ( cls : Optional[Any] , *SCREAMING_SNAKE_CASE__ : Tuple , **SCREAMING_SNAKE_CASE__ : Union[str, Any] ):
'''simple docstring'''
requires_backends(cls , ['torch', 'transformers', 'onnx'] )
@classmethod
def __lowerCAmelCase ( cls : int , *SCREAMING_SNAKE_CASE__ : Optional[Any] , **SCREAMING_SNAKE_CASE__ : Tuple ):
'''simple docstring'''
requires_backends(cls , ['torch', 'transformers', 'onnx'] )
| 47 | 1 |
from ...utils import is_note_seq_available, is_transformers_available, is_torch_available
from ...utils import OptionalDependencyNotAvailable
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .notes_encoder import SpectrogramNotesEncoder
from .continous_encoder import SpectrogramContEncoder
from .pipeline_spectrogram_diffusion import (
SpectrogramContEncoder,
SpectrogramDiffusionPipeline,
TaFilmDecoder,
)
try:
if not (is_transformers_available() and is_torch_available() and is_note_seq_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_transformers_and_torch_and_note_seq_objects import * # noqa F403
else:
from .midi_utils import MidiProcessor
| 47 |
import math
from datetime import datetime, timedelta
def UpperCAmelCase__ ( lowerCamelCase_ : int ):
__a : Union[str, Any] = year % 1_9
__a : int = year % 4
__a : Optional[int] = year % 7
__a : Dict = math.floor(year / 1_0_0 )
__a : Optional[Any] = math.floor((1_3 + 8 * leap_day_inhibits) / 2_5 )
__a : Union[str, Any] = leap_day_inhibits / 4
__a : str = (
1_5 - lunar_orbit_correction + leap_day_inhibits - leap_day_reinstall_number
) % 3_0
__a : Union[str, Any] = (4 + leap_day_inhibits - leap_day_reinstall_number) % 7
# days to be added to March 21
__a : List[Any] = (1_9 * metonic_cycle + secular_moon_shift) % 3_0
# PHM -> Paschal Full Moon
__a : List[Any] = (
2 * julian_leap_year
+ 4 * non_leap_year
+ 6 * days_to_add
+ century_starting_point
) % 7
if days_to_add == 2_9 and days_from_phm_to_sunday == 6:
return datetime(lowerCamelCase_ , 4 , 1_9 )
elif days_to_add == 2_8 and days_from_phm_to_sunday == 6:
return datetime(lowerCamelCase_ , 4 , 1_8 )
else:
return datetime(lowerCamelCase_ , 3 , 2_2 ) + timedelta(
days=int(days_to_add + days_from_phm_to_sunday ) )
if __name__ == "__main__":
for year in (1994, 2000, 2010, 2021, 2023):
SCREAMING_SNAKE_CASE__ = '''will be''' if year > datetime.now().year else '''was'''
print(F"Easter in {year} {tense} {gauss_easter(year)}")
| 47 | 1 |
def UpperCAmelCase__ ( lowerCamelCase_ : int = 1 , lowerCamelCase_ : int = 1_0_0_0 ):
__a : Dict = 1
__a : Optional[Any] = 0
for divide_by_number in range(lowerCamelCase_ , digit + 1 ):
__a : list[int] = []
__a : List[Any] = numerator
for _ in range(1 , digit + 1 ):
if now_divide in has_been_divided:
if longest_list_length < len(lowerCamelCase_ ):
__a : Dict = len(lowerCamelCase_ )
__a : Optional[int] = divide_by_number
else:
has_been_divided.append(lowerCamelCase_ )
__a : Optional[Any] = now_divide * 1_0 % divide_by_number
return the_digit
# Tests
if __name__ == "__main__":
import doctest
doctest.testmod()
| 47 |
from typing import List, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ = {
'''huggingface/informer-tourism-monthly''': (
'''https://huggingface.co/huggingface/informer-tourism-monthly/resolve/main/config.json'''
),
# See all Informer models at https://huggingface.co/models?filter=informer
}
class _UpperCamelCase( __lowerCamelCase ):
__SCREAMING_SNAKE_CASE : List[Any] = '''informer'''
__SCREAMING_SNAKE_CASE : List[Any] = {
'''hidden_size''': '''d_model''',
'''num_attention_heads''': '''encoder_attention_heads''',
'''num_hidden_layers''': '''encoder_layers''',
}
def __init__( self : Optional[int] , SCREAMING_SNAKE_CASE__ : Optional[int] = None , SCREAMING_SNAKE_CASE__ : Optional[int] = None , SCREAMING_SNAKE_CASE__ : str = "student_t" , SCREAMING_SNAKE_CASE__ : str = "nll" , SCREAMING_SNAKE_CASE__ : int = 1 , SCREAMING_SNAKE_CASE__ : List[int] = None , SCREAMING_SNAKE_CASE__ : Optional[Union[str, bool]] = "mean" , SCREAMING_SNAKE_CASE__ : int = 0 , SCREAMING_SNAKE_CASE__ : int = 0 , SCREAMING_SNAKE_CASE__ : int = 0 , SCREAMING_SNAKE_CASE__ : int = 0 , SCREAMING_SNAKE_CASE__ : Optional[List[int]] = None , SCREAMING_SNAKE_CASE__ : Optional[List[int]] = None , SCREAMING_SNAKE_CASE__ : int = 6_4 , SCREAMING_SNAKE_CASE__ : int = 3_2 , SCREAMING_SNAKE_CASE__ : int = 3_2 , SCREAMING_SNAKE_CASE__ : int = 2 , SCREAMING_SNAKE_CASE__ : int = 2 , SCREAMING_SNAKE_CASE__ : int = 2 , SCREAMING_SNAKE_CASE__ : int = 2 , SCREAMING_SNAKE_CASE__ : bool = True , SCREAMING_SNAKE_CASE__ : str = "gelu" , SCREAMING_SNAKE_CASE__ : float = 0.05 , SCREAMING_SNAKE_CASE__ : float = 0.1 , SCREAMING_SNAKE_CASE__ : float = 0.1 , SCREAMING_SNAKE_CASE__ : float = 0.1 , SCREAMING_SNAKE_CASE__ : float = 0.1 , SCREAMING_SNAKE_CASE__ : int = 1_0_0 , SCREAMING_SNAKE_CASE__ : float = 0.02 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=True , SCREAMING_SNAKE_CASE__ : str = "prob" , SCREAMING_SNAKE_CASE__ : int = 5 , SCREAMING_SNAKE_CASE__ : bool = True , **SCREAMING_SNAKE_CASE__ : Tuple , ):
'''simple docstring'''
__a : Dict = prediction_length
__a : Tuple = context_length or prediction_length
__a : Tuple = distribution_output
__a : Tuple = loss
__a : str = input_size
__a : Dict = num_time_features
__a : Optional[int] = lags_sequence if lags_sequence is not None else [1, 2, 3, 4, 5, 6, 7]
__a : str = scaling
__a : Tuple = num_dynamic_real_features
__a : int = num_static_real_features
__a : Dict = num_static_categorical_features
# set cardinality
if cardinality and num_static_categorical_features > 0:
if len(SCREAMING_SNAKE_CASE__ ) != num_static_categorical_features:
raise ValueError(
'The cardinality should be a list of the same length as `num_static_categorical_features`' )
__a : Optional[Any] = cardinality
else:
__a : Optional[int] = [0]
# set embedding_dimension
if embedding_dimension and num_static_categorical_features > 0:
if len(SCREAMING_SNAKE_CASE__ ) != num_static_categorical_features:
raise ValueError(
'The embedding dimension should be a list of the same length as `num_static_categorical_features`' )
__a : int = embedding_dimension
else:
__a : List[Any] = [min(5_0 , (cat + 1) // 2 ) for cat in self.cardinality]
__a : int = num_parallel_samples
# Transformer architecture configuration
__a : str = input_size * len(self.lags_sequence ) + self._number_of_features
__a : Optional[int] = d_model
__a : Union[str, Any] = encoder_attention_heads
__a : int = decoder_attention_heads
__a : Any = encoder_ffn_dim
__a : Union[str, Any] = decoder_ffn_dim
__a : List[Any] = encoder_layers
__a : Optional[int] = decoder_layers
__a : int = dropout
__a : Optional[Any] = attention_dropout
__a : Dict = activation_dropout
__a : Union[str, Any] = encoder_layerdrop
__a : Optional[int] = decoder_layerdrop
__a : List[str] = activation_function
__a : str = init_std
__a : Optional[int] = use_cache
# Informer
__a : Union[str, Any] = attention_type
__a : str = sampling_factor
__a : Dict = distil
super().__init__(is_encoder_decoder=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
@property
def __lowerCAmelCase ( self : Any ):
'''simple docstring'''
return (
sum(self.embedding_dimension )
+ self.num_dynamic_real_features
+ self.num_time_features
+ self.num_static_real_features
+ self.input_size * 2 # the log1p(abs(loc)) and log(scale) features
)
| 47 | 1 |
from __future__ import annotations
from collections.abc import Callable
from typing import Any, Generic, TypeVar
SCREAMING_SNAKE_CASE__ = TypeVar('''T''')
class _UpperCamelCase( Generic[T] ):
def __init__( self : str , SCREAMING_SNAKE_CASE__ : list[T] , SCREAMING_SNAKE_CASE__ : Callable[[T, T], T] ):
'''simple docstring'''
__a : Any | T = None
__a : int = len(SCREAMING_SNAKE_CASE__ )
__a : list[T] = [any_type for _ in range(self.N )] + arr
__a : List[Any] = fnc
self.build()
def __lowerCAmelCase ( self : int ):
'''simple docstring'''
for p in range(self.N - 1 , 0 , -1 ):
__a : Dict = self.fn(self.st[p * 2] , self.st[p * 2 + 1] )
def __lowerCAmelCase ( self : Tuple , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : T ):
'''simple docstring'''
p += self.N
__a : Dict = v
while p > 1:
__a : Tuple = p // 2
__a : Any = self.fn(self.st[p * 2] , self.st[p * 2 + 1] )
def __lowerCAmelCase ( self : List[str] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int ): # noqa: E741
'''simple docstring'''
__a , __a : Any = l + self.N, r + self.N
__a : T | None = None
while l <= r:
if l % 2 == 1:
__a : str = self.st[l] if res is None else self.fn(SCREAMING_SNAKE_CASE__ , self.st[l] )
if r % 2 == 0:
__a : List[Any] = self.st[r] if res is None else self.fn(SCREAMING_SNAKE_CASE__ , self.st[r] )
__a , __a : Optional[Any] = (l + 1) // 2, (r - 1) // 2
return res
if __name__ == "__main__":
from functools import reduce
SCREAMING_SNAKE_CASE__ = [1, 10, -2, 9, -3, 8, 4, -7, 5, 6, 11, -12]
SCREAMING_SNAKE_CASE__ = {
0: 7,
1: 2,
2: 6,
3: -14,
4: 5,
5: 4,
6: 7,
7: -10,
8: 9,
9: 10,
10: 12,
11: 1,
}
SCREAMING_SNAKE_CASE__ = SegmentTree(test_array, min)
SCREAMING_SNAKE_CASE__ = SegmentTree(test_array, max)
SCREAMING_SNAKE_CASE__ = SegmentTree(test_array, lambda a, b: a + b)
def UpperCAmelCase__ ( ):
for i in range(len(lowerCamelCase_ ) ):
for j in range(lowerCamelCase_ , len(lowerCamelCase_ ) ):
__a : str = reduce(lowerCamelCase_ , test_array[i : j + 1] )
__a : Tuple = reduce(lowerCamelCase_ , test_array[i : j + 1] )
__a : Optional[int] = reduce(lambda lowerCamelCase_ , lowerCamelCase_ : a + b , test_array[i : j + 1] )
assert min_range == min_segment_tree.query(lowerCamelCase_ , lowerCamelCase_ )
assert max_range == max_segment_tree.query(lowerCamelCase_ , lowerCamelCase_ )
assert sum_range == sum_segment_tree.query(lowerCamelCase_ , lowerCamelCase_ )
test_all_segments()
for index, value in test_updates.items():
SCREAMING_SNAKE_CASE__ = value
min_segment_tree.update(index, value)
max_segment_tree.update(index, value)
sum_segment_tree.update(index, value)
test_all_segments()
| 47 |
import torch
from diffusers import DDIMParallelScheduler
from .test_schedulers import SchedulerCommonTest
class _UpperCamelCase( __lowerCamelCase ):
__SCREAMING_SNAKE_CASE : int = (DDIMParallelScheduler,)
__SCREAMING_SNAKE_CASE : Union[str, Any] = (('''eta''', 0.0), ('''num_inference_steps''', 50))
def __lowerCAmelCase ( self : str , **SCREAMING_SNAKE_CASE__ : Optional[int] ):
'''simple docstring'''
__a : List[Any] = {
'num_train_timesteps': 1_0_0_0,
'beta_start': 0.0_001,
'beta_end': 0.02,
'beta_schedule': 'linear',
'clip_sample': True,
}
config.update(**SCREAMING_SNAKE_CASE__ )
return config
def __lowerCAmelCase ( self : str , **SCREAMING_SNAKE_CASE__ : Optional[int] ):
'''simple docstring'''
__a : Tuple = self.scheduler_classes[0]
__a : Optional[Any] = self.get_scheduler_config(**SCREAMING_SNAKE_CASE__ )
__a : Any = scheduler_class(**SCREAMING_SNAKE_CASE__ )
__a , __a : List[str] = 1_0, 0.0
__a : Dict = self.dummy_model()
__a : str = self.dummy_sample_deter
scheduler.set_timesteps(SCREAMING_SNAKE_CASE__ )
for t in scheduler.timesteps:
__a : str = model(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
__a : List[str] = scheduler.step(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ).prev_sample
return sample
def __lowerCAmelCase ( self : Tuple ):
'''simple docstring'''
for timesteps in [1_0_0, 5_0_0, 1_0_0_0]:
self.check_over_configs(num_train_timesteps=SCREAMING_SNAKE_CASE__ )
def __lowerCAmelCase ( self : Dict ):
'''simple docstring'''
for steps_offset in [0, 1]:
self.check_over_configs(steps_offset=SCREAMING_SNAKE_CASE__ )
__a : Optional[Any] = self.scheduler_classes[0]
__a : List[str] = self.get_scheduler_config(steps_offset=1 )
__a : Optional[Any] = scheduler_class(**SCREAMING_SNAKE_CASE__ )
scheduler.set_timesteps(5 )
assert torch.equal(scheduler.timesteps , torch.LongTensor([8_0_1, 6_0_1, 4_0_1, 2_0_1, 1] ) )
def __lowerCAmelCase ( self : Optional[int] ):
'''simple docstring'''
for beta_start, beta_end in zip([0.0_001, 0.001, 0.01, 0.1] , [0.002, 0.02, 0.2, 2] ):
self.check_over_configs(beta_start=SCREAMING_SNAKE_CASE__ , beta_end=SCREAMING_SNAKE_CASE__ )
def __lowerCAmelCase ( self : int ):
'''simple docstring'''
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=SCREAMING_SNAKE_CASE__ )
def __lowerCAmelCase ( self : Optional[Any] ):
'''simple docstring'''
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=SCREAMING_SNAKE_CASE__ )
def __lowerCAmelCase ( self : List[Any] ):
'''simple docstring'''
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=SCREAMING_SNAKE_CASE__ )
def __lowerCAmelCase ( self : Optional[Any] ):
'''simple docstring'''
for timestep_spacing in ["trailing", "leading"]:
self.check_over_configs(timestep_spacing=SCREAMING_SNAKE_CASE__ )
def __lowerCAmelCase ( self : int ):
'''simple docstring'''
for rescale_betas_zero_snr in [True, False]:
self.check_over_configs(rescale_betas_zero_snr=SCREAMING_SNAKE_CASE__ )
def __lowerCAmelCase ( self : str ):
'''simple docstring'''
self.check_over_configs(thresholding=SCREAMING_SNAKE_CASE__ )
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(
thresholding=SCREAMING_SNAKE_CASE__ , prediction_type=SCREAMING_SNAKE_CASE__ , sample_max_value=SCREAMING_SNAKE_CASE__ , )
def __lowerCAmelCase ( self : Optional[int] ):
'''simple docstring'''
for t in [1, 1_0, 4_9]:
self.check_over_forward(time_step=SCREAMING_SNAKE_CASE__ )
def __lowerCAmelCase ( self : List[str] ):
'''simple docstring'''
for t, num_inference_steps in zip([1, 1_0, 5_0] , [1_0, 5_0, 5_0_0] ):
self.check_over_forward(time_step=SCREAMING_SNAKE_CASE__ , num_inference_steps=SCREAMING_SNAKE_CASE__ )
def __lowerCAmelCase ( self : List[Any] ):
'''simple docstring'''
for t, eta in zip([1, 1_0, 4_9] , [0.0, 0.5, 1.0] ):
self.check_over_forward(time_step=SCREAMING_SNAKE_CASE__ , eta=SCREAMING_SNAKE_CASE__ )
def __lowerCAmelCase ( self : List[Any] ):
'''simple docstring'''
__a : List[str] = self.scheduler_classes[0]
__a : Union[str, Any] = self.get_scheduler_config()
__a : Any = scheduler_class(**SCREAMING_SNAKE_CASE__ )
assert torch.sum(torch.abs(scheduler._get_variance(0 , 0 ) - 0.0 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(4_2_0 , 4_0_0 ) - 0.14_771 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(9_8_0 , 9_6_0 ) - 0.32_460 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(0 , 0 ) - 0.0 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(4_8_7 , 4_8_6 ) - 0.00_979 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(9_9_9 , 9_9_8 ) - 0.02 ) ) < 1e-5
def __lowerCAmelCase ( self : Optional[Any] ):
'''simple docstring'''
__a : List[str] = self.scheduler_classes[0]
__a : List[str] = self.get_scheduler_config()
__a : Optional[Any] = scheduler_class(**SCREAMING_SNAKE_CASE__ )
__a , __a : Any = 1_0, 0.0
scheduler.set_timesteps(SCREAMING_SNAKE_CASE__ )
__a : List[Any] = self.dummy_model()
__a : int = self.dummy_sample_deter
__a : List[Any] = self.dummy_sample_deter + 0.1
__a : List[str] = self.dummy_sample_deter - 0.1
__a : Optional[Any] = samplea.shape[0]
__a : Optional[Any] = torch.stack([samplea, samplea, samplea] , dim=0 )
__a : Union[str, Any] = torch.arange(SCREAMING_SNAKE_CASE__ )[0:3, None].repeat(1 , SCREAMING_SNAKE_CASE__ )
__a : int = model(samples.flatten(0 , 1 ) , timesteps.flatten(0 , 1 ) )
__a : int = scheduler.batch_step_no_noise(SCREAMING_SNAKE_CASE__ , timesteps.flatten(0 , 1 ) , samples.flatten(0 , 1 ) , SCREAMING_SNAKE_CASE__ )
__a : Dict = torch.sum(torch.abs(SCREAMING_SNAKE_CASE__ ) )
__a : Optional[Any] = torch.mean(torch.abs(SCREAMING_SNAKE_CASE__ ) )
assert abs(result_sum.item() - 1_147.7_904 ) < 1e-2
assert abs(result_mean.item() - 0.4_982 ) < 1e-3
def __lowerCAmelCase ( self : str ):
'''simple docstring'''
__a : List[str] = self.full_loop()
__a : Tuple = torch.sum(torch.abs(SCREAMING_SNAKE_CASE__ ) )
__a : int = torch.mean(torch.abs(SCREAMING_SNAKE_CASE__ ) )
assert abs(result_sum.item() - 172.0_067 ) < 1e-2
assert abs(result_mean.item() - 0.223_967 ) < 1e-3
def __lowerCAmelCase ( self : Optional[int] ):
'''simple docstring'''
__a : Optional[int] = self.full_loop(prediction_type='v_prediction' )
__a : str = torch.sum(torch.abs(SCREAMING_SNAKE_CASE__ ) )
__a : Union[str, Any] = torch.mean(torch.abs(SCREAMING_SNAKE_CASE__ ) )
assert abs(result_sum.item() - 52.5_302 ) < 1e-2
assert abs(result_mean.item() - 0.0_684 ) < 1e-3
def __lowerCAmelCase ( self : int ):
'''simple docstring'''
__a : Union[str, Any] = self.full_loop(set_alpha_to_one=SCREAMING_SNAKE_CASE__ , beta_start=0.01 )
__a : Optional[int] = torch.sum(torch.abs(SCREAMING_SNAKE_CASE__ ) )
__a : Optional[int] = torch.mean(torch.abs(SCREAMING_SNAKE_CASE__ ) )
assert abs(result_sum.item() - 149.8_295 ) < 1e-2
assert abs(result_mean.item() - 0.1_951 ) < 1e-3
def __lowerCAmelCase ( self : str ):
'''simple docstring'''
__a : Dict = self.full_loop(set_alpha_to_one=SCREAMING_SNAKE_CASE__ , beta_start=0.01 )
__a : str = torch.sum(torch.abs(SCREAMING_SNAKE_CASE__ ) )
__a : Tuple = torch.mean(torch.abs(SCREAMING_SNAKE_CASE__ ) )
assert abs(result_sum.item() - 149.0_784 ) < 1e-2
assert abs(result_mean.item() - 0.1_941 ) < 1e-3
| 47 | 1 |
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
if is_torch_available():
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
@require_torch
@require_sentencepiece
@require_tokenizers
class _UpperCamelCase( unittest.TestCase ):
@slow
def __lowerCAmelCase ( self : Optional[int] ):
'''simple docstring'''
__a : str = AutoModelForSeqaSeqLM.from_pretrained('google/mt5-small' , return_dict=SCREAMING_SNAKE_CASE__ ).to(SCREAMING_SNAKE_CASE__ )
__a : Union[str, Any] = AutoTokenizer.from_pretrained('google/mt5-small' )
__a : List[Any] = tokenizer('Hello there' , return_tensors='pt' ).input_ids
__a : Dict = tokenizer('Hi I am' , return_tensors='pt' ).input_ids
__a : List[Any] = model(input_ids.to(SCREAMING_SNAKE_CASE__ ) , labels=labels.to(SCREAMING_SNAKE_CASE__ ) ).loss
__a : Union[str, Any] = -(labels.shape[-1] * loss.item())
__a : str = -84.9_127
self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 1e-4 )
| 47 |
def UpperCAmelCase__ ( lowerCamelCase_ : list[int] , lowerCamelCase_ : list[int] ):
# Check if the input is valid
if not len(lowerCamelCase_ ) == len(lowerCamelCase_ ) == 3:
raise ValueError('Please enter a valid equation.' )
if equationa[0] == equationa[1] == equationa[0] == equationa[1] == 0:
raise ValueError('Both a & b of two equations can\'t be zero.' )
# Extract the coefficients
__a , __a , __a : Optional[Any] = equationa
__a , __a , __a : Optional[int] = equationa
# Calculate the determinants of the matrices
__a : str = aa * ba - aa * ba
__a : Tuple = ca * ba - ca * ba
__a : Union[str, Any] = aa * ca - aa * ca
# Check if the system of linear equations has a solution (using Cramer's rule)
if determinant == 0:
if determinant_x == determinant_y == 0:
raise ValueError('Infinite solutions. (Consistent system)' )
else:
raise ValueError('No solution. (Inconsistent system)' )
else:
if determinant_x == determinant_y == 0:
# Trivial solution (Inconsistent system)
return (0.0, 0.0)
else:
__a : Any = determinant_x / determinant
__a : Optional[Any] = determinant_y / determinant
# Non-Trivial Solution (Consistent system)
return (x, y)
| 47 | 1 |
def UpperCAmelCase__ ( lowerCamelCase_ : Tuple , lowerCamelCase_ : Optional[Any] , lowerCamelCase_ : int=False ):
if isinstance(lowerCamelCase_ , lowerCamelCase_ ) and isinstance(lowerCamelCase_ , lowerCamelCase_ ):
__a : Tuple = len(set_a.intersection(lowerCamelCase_ ) )
if alternative_union:
__a : Union[str, Any] = len(lowerCamelCase_ ) + len(lowerCamelCase_ )
else:
__a : Union[str, Any] = len(set_a.union(lowerCamelCase_ ) )
return intersection / union
if isinstance(lowerCamelCase_ , (list, tuple) ) and isinstance(lowerCamelCase_ , (list, tuple) ):
__a : int = [element for element in set_a if element in set_b]
if alternative_union:
__a : List[str] = len(lowerCamelCase_ ) + len(lowerCamelCase_ )
return len(lowerCamelCase_ ) / union
else:
__a : Optional[int] = set_a + [element for element in set_b if element not in set_a]
return len(lowerCamelCase_ ) / len(lowerCamelCase_ )
return len(lowerCamelCase_ ) / len(lowerCamelCase_ )
return None
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ = {'''a''', '''b''', '''c''', '''d''', '''e'''}
SCREAMING_SNAKE_CASE__ = {'''c''', '''d''', '''e''', '''f''', '''h''', '''i'''}
print(jaccard_similarity(set_a, set_b))
| 47 |
from ...utils import is_note_seq_available, is_transformers_available, is_torch_available
from ...utils import OptionalDependencyNotAvailable
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .notes_encoder import SpectrogramNotesEncoder
from .continous_encoder import SpectrogramContEncoder
from .pipeline_spectrogram_diffusion import (
SpectrogramContEncoder,
SpectrogramDiffusionPipeline,
TaFilmDecoder,
)
try:
if not (is_transformers_available() and is_torch_available() and is_note_seq_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_transformers_and_torch_and_note_seq_objects import * # noqa F403
else:
from .midi_utils import MidiProcessor
| 47 | 1 |
import math
from numpy import inf
from scipy.integrate import quad
def UpperCAmelCase__ ( lowerCamelCase_ : float ):
if num <= 0:
raise ValueError('math domain error' )
return quad(lowerCamelCase_ , 0 , lowerCamelCase_ , args=(lowerCamelCase_) )[0]
def UpperCAmelCase__ ( lowerCamelCase_ : float , lowerCamelCase_ : float ):
return math.pow(lowerCamelCase_ , z - 1 ) * math.exp(-x )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 47 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
SCREAMING_SNAKE_CASE__ = {
'''configuration_bridgetower''': [
'''BRIDGETOWER_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''BridgeTowerConfig''',
'''BridgeTowerTextConfig''',
'''BridgeTowerVisionConfig''',
],
'''processing_bridgetower''': ['''BridgeTowerProcessor'''],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ = ['''BridgeTowerImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ = [
'''BRIDGETOWER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''BridgeTowerForContrastiveLearning''',
'''BridgeTowerForImageAndTextRetrieval''',
'''BridgeTowerForMaskedLM''',
'''BridgeTowerModel''',
'''BridgeTowerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_bridgetower import (
BRIDGETOWER_PRETRAINED_CONFIG_ARCHIVE_MAP,
BridgeTowerConfig,
BridgeTowerTextConfig,
BridgeTowerVisionConfig,
)
from .processing_bridgetower import BridgeTowerProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_bridgetower import BridgeTowerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_bridgetower import (
BRIDGETOWER_PRETRAINED_MODEL_ARCHIVE_LIST,
BridgeTowerForContrastiveLearning,
BridgeTowerForImageAndTextRetrieval,
BridgeTowerForMaskedLM,
BridgeTowerModel,
BridgeTowerPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
| 47 | 1 |
import sys
from .dependency_versions_table import deps
from .utils.versions import require_version, require_version_core
# define which module versions we always want to check at run time
# (usually the ones defined in `install_requires` in setup.py)
#
# order specific notes:
# - tqdm must be checked before tokenizers
SCREAMING_SNAKE_CASE__ = '''python tqdm regex requests packaging filelock numpy tokenizers'''.split()
if sys.version_info < (3, 7):
pkgs_to_check_at_runtime.append('''dataclasses''')
if sys.version_info < (3, 8):
pkgs_to_check_at_runtime.append('''importlib_metadata''')
for pkg in pkgs_to_check_at_runtime:
if pkg in deps:
if pkg == "tokenizers":
# must be loaded here, or else tqdm check may fail
from .utils import is_tokenizers_available
if not is_tokenizers_available():
continue # not required, check version only if installed
require_version_core(deps[pkg])
else:
raise ValueError(F"can't find {pkg} in {deps.keys()}, check dependency_versions_table.py")
def UpperCAmelCase__ ( lowerCamelCase_ : Optional[Any] , lowerCamelCase_ : List[str]=None ):
require_version(deps[pkg] , lowerCamelCase_ )
| 47 |
from string import ascii_lowercase, ascii_uppercase
def UpperCAmelCase__ ( lowerCamelCase_ : str ):
if not sentence:
return ""
__a : Union[str, Any] = dict(zip(lowerCamelCase_ , lowerCamelCase_ ) )
return lower_to_upper.get(sentence[0] , sentence[0] ) + sentence[1:]
if __name__ == "__main__":
from doctest import testmod
testmod()
| 47 | 1 |
from typing import List, Optional, Union
import numpy as np
import PIL.Image
from ...image_processing_utils import BaseImageProcessor, BatchFeature
from ...image_transforms import rescale, resize, to_channel_dimension_format
from ...image_utils import (
ChannelDimension,
PILImageResampling,
get_image_size,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
class _UpperCamelCase( __lowerCamelCase ):
__SCREAMING_SNAKE_CASE : Optional[int] = ['''pixel_values''']
def __init__( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : bool = True , SCREAMING_SNAKE_CASE__ : int = 3_2 , SCREAMING_SNAKE_CASE__ : Tuple=PILImageResampling.BILINEAR , SCREAMING_SNAKE_CASE__ : bool = True , **SCREAMING_SNAKE_CASE__ : Union[str, Any] , ):
'''simple docstring'''
__a : Union[str, Any] = do_resize
__a : Optional[Any] = do_rescale
__a : Union[str, Any] = size_divisor
__a : str = resample
super().__init__(**SCREAMING_SNAKE_CASE__ )
def __lowerCAmelCase ( self : Union[str, Any] , SCREAMING_SNAKE_CASE__ : np.ndarray , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Optional[ChannelDimension] = None , **SCREAMING_SNAKE_CASE__ : Dict ):
'''simple docstring'''
__a , __a : List[str] = get_image_size(SCREAMING_SNAKE_CASE__ )
# Rounds the height and width down to the closest multiple of size_divisor
__a : List[str] = height // size_divisor * size_divisor
__a : List[Any] = width // size_divisor * size_divisor
__a : Optional[Any] = resize(SCREAMING_SNAKE_CASE__ , (new_h, new_w) , resample=SCREAMING_SNAKE_CASE__ , data_format=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
return image
def __lowerCAmelCase ( self : str , SCREAMING_SNAKE_CASE__ : np.ndarray , SCREAMING_SNAKE_CASE__ : float , SCREAMING_SNAKE_CASE__ : Optional[ChannelDimension] = None , **SCREAMING_SNAKE_CASE__ : int ):
'''simple docstring'''
return rescale(image=SCREAMING_SNAKE_CASE__ , scale=SCREAMING_SNAKE_CASE__ , data_format=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
def __lowerCAmelCase ( self : List[str] , SCREAMING_SNAKE_CASE__ : Union["PIL.Image.Image", TensorType, List["PIL.Image.Image"], List[TensorType]] , SCREAMING_SNAKE_CASE__ : Optional[bool] = None , SCREAMING_SNAKE_CASE__ : Optional[int] = None , SCREAMING_SNAKE_CASE__ : int=None , SCREAMING_SNAKE_CASE__ : Optional[bool] = None , SCREAMING_SNAKE_CASE__ : Optional[Union[TensorType, str]] = None , SCREAMING_SNAKE_CASE__ : ChannelDimension = ChannelDimension.FIRST , **SCREAMING_SNAKE_CASE__ : str , ):
'''simple docstring'''
__a : Optional[int] = do_resize if do_resize is not None else self.do_resize
__a : str = do_rescale if do_rescale is not None else self.do_rescale
__a : Optional[Any] = size_divisor if size_divisor is not None else self.size_divisor
__a : Optional[Any] = resample if resample is not None else self.resample
if do_resize and size_divisor is None:
raise ValueError('size_divisor is required for resizing' )
__a : Union[str, Any] = make_list_of_images(SCREAMING_SNAKE_CASE__ )
if not valid_images(SCREAMING_SNAKE_CASE__ ):
raise ValueError('Invalid image(s)' )
# All transformations expect numpy arrays.
__a : Optional[Any] = [to_numpy_array(SCREAMING_SNAKE_CASE__ ) for img in images]
if do_resize:
__a : Union[str, Any] = [self.resize(SCREAMING_SNAKE_CASE__ , size_divisor=SCREAMING_SNAKE_CASE__ , resample=SCREAMING_SNAKE_CASE__ ) for image in images]
if do_rescale:
__a : Tuple = [self.rescale(SCREAMING_SNAKE_CASE__ , scale=1 / 2_5_5 ) for image in images]
__a : Tuple = [to_channel_dimension_format(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) for image in images]
__a : Dict = {'pixel_values': images}
return BatchFeature(data=SCREAMING_SNAKE_CASE__ , tensor_type=SCREAMING_SNAKE_CASE__ )
| 47 |
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ = {
'''asapp/sew-d-tiny-100k''': '''https://huggingface.co/asapp/sew-d-tiny-100k/resolve/main/config.json''',
# See all SEW-D models at https://huggingface.co/models?filter=sew-d
}
class _UpperCamelCase( __lowerCamelCase ):
__SCREAMING_SNAKE_CASE : Optional[Any] = '''sew-d'''
def __init__( self : Dict , SCREAMING_SNAKE_CASE__ : Dict=3_2 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=7_6_8 , SCREAMING_SNAKE_CASE__ : Optional[Any]=1_2 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=1_2 , SCREAMING_SNAKE_CASE__ : str=3_0_7_2 , SCREAMING_SNAKE_CASE__ : str=2 , SCREAMING_SNAKE_CASE__ : Optional[Any]=5_1_2 , SCREAMING_SNAKE_CASE__ : List[str]=2_5_6 , SCREAMING_SNAKE_CASE__ : Optional[Any]=True , SCREAMING_SNAKE_CASE__ : Tuple=True , SCREAMING_SNAKE_CASE__ : List[str]=("p2c", "c2p") , SCREAMING_SNAKE_CASE__ : str="layer_norm" , SCREAMING_SNAKE_CASE__ : Tuple="gelu_python" , SCREAMING_SNAKE_CASE__ : Tuple=0.1 , SCREAMING_SNAKE_CASE__ : Optional[int]=0.1 , SCREAMING_SNAKE_CASE__ : Any=0.1 , SCREAMING_SNAKE_CASE__ : Any=0.0 , SCREAMING_SNAKE_CASE__ : str=0.1 , SCREAMING_SNAKE_CASE__ : Optional[int]=0.02 , SCREAMING_SNAKE_CASE__ : int=1e-7 , SCREAMING_SNAKE_CASE__ : Any=1e-5 , SCREAMING_SNAKE_CASE__ : Optional[int]="group" , SCREAMING_SNAKE_CASE__ : Optional[Any]="gelu" , SCREAMING_SNAKE_CASE__ : Optional[int]=(6_4, 1_2_8, 1_2_8, 1_2_8, 1_2_8, 2_5_6, 2_5_6, 2_5_6, 2_5_6, 5_1_2, 5_1_2, 5_1_2, 5_1_2) , SCREAMING_SNAKE_CASE__ : List[Any]=(5, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1) , SCREAMING_SNAKE_CASE__ : str=(1_0, 3, 1, 3, 1, 3, 1, 3, 1, 2, 1, 2, 1) , SCREAMING_SNAKE_CASE__ : Optional[int]=False , SCREAMING_SNAKE_CASE__ : Optional[int]=1_2_8 , SCREAMING_SNAKE_CASE__ : Tuple=1_6 , SCREAMING_SNAKE_CASE__ : Any=True , SCREAMING_SNAKE_CASE__ : List[Any]=0.05 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=1_0 , SCREAMING_SNAKE_CASE__ : List[Any]=2 , SCREAMING_SNAKE_CASE__ : int=0.0 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=1_0 , SCREAMING_SNAKE_CASE__ : Optional[int]=0 , SCREAMING_SNAKE_CASE__ : Optional[int]="mean" , SCREAMING_SNAKE_CASE__ : List[Any]=False , SCREAMING_SNAKE_CASE__ : List[str]=False , SCREAMING_SNAKE_CASE__ : str=2_5_6 , SCREAMING_SNAKE_CASE__ : str=0 , SCREAMING_SNAKE_CASE__ : List[Any]=1 , SCREAMING_SNAKE_CASE__ : List[Any]=2 , **SCREAMING_SNAKE_CASE__ : Any , ):
'''simple docstring'''
super().__init__(**SCREAMING_SNAKE_CASE__ , pad_token_id=SCREAMING_SNAKE_CASE__ , bos_token_id=SCREAMING_SNAKE_CASE__ , eos_token_id=SCREAMING_SNAKE_CASE__ )
__a : Optional[int] = hidden_size
__a : Optional[Any] = feat_extract_norm
__a : List[str] = feat_extract_activation
__a : Dict = list(SCREAMING_SNAKE_CASE__ )
__a : Union[str, Any] = list(SCREAMING_SNAKE_CASE__ )
__a : List[str] = list(SCREAMING_SNAKE_CASE__ )
__a : int = conv_bias
__a : Tuple = num_conv_pos_embeddings
__a : List[str] = num_conv_pos_embedding_groups
__a : Optional[Any] = len(self.conv_dim )
__a : Union[str, Any] = num_hidden_layers
__a : Optional[Any] = intermediate_size
__a : Union[str, Any] = squeeze_factor
__a : List[Any] = max_position_embeddings
__a : Tuple = position_buckets
__a : Optional[int] = share_att_key
__a : List[str] = relative_attention
__a : Any = norm_rel_ebd
__a : Any = list(SCREAMING_SNAKE_CASE__ )
__a : Union[str, Any] = hidden_act
__a : str = num_attention_heads
__a : Union[str, Any] = hidden_dropout
__a : Optional[int] = attention_dropout
__a : List[str] = activation_dropout
__a : int = feat_proj_dropout
__a : int = final_dropout
__a : Dict = layer_norm_eps
__a : Tuple = feature_layer_norm_eps
__a : str = initializer_range
__a : Tuple = vocab_size
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
'Configuration for convolutional layers is incorrect.'
'It is required that `len(config.conv_dim)` == `len(config.conv_stride)` == `len(config.conv_kernel)`,'
f'''but is `len(config.conv_dim) = {len(self.conv_dim )}`, `len(config.conv_stride)'''
f'''= {len(self.conv_stride )}`, `len(config.conv_kernel) = {len(self.conv_kernel )}`.''' )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
__a : Tuple = apply_spec_augment
__a : Optional[Any] = mask_time_prob
__a : Any = mask_time_length
__a : List[str] = mask_time_min_masks
__a : List[str] = mask_feature_prob
__a : Tuple = mask_feature_length
__a : Any = mask_feature_min_masks
# ctc loss
__a : Optional[int] = ctc_loss_reduction
__a : List[Any] = ctc_zero_infinity
# sequence classification
__a : Dict = use_weighted_layer_sum
__a : Optional[Any] = classifier_proj_size
@property
def __lowerCAmelCase ( self : Optional[Any] ):
'''simple docstring'''
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 47 | 1 |
def UpperCAmelCase__ ( lowerCamelCase_ : int ):
__a : Dict = []
__a : Dict = set({'(', '[', '{'} )
__a : str = set({')', ']', '}'} )
__a : Dict = {'{': '}', '[': ']', '(': ')'}
for i in range(len(lowerCamelCase_ ) ):
if s[i] in open_brackets:
stack.append(s[i] )
elif s[i] in closed_brackets and (
len(lowerCamelCase_ ) == 0 or (len(lowerCamelCase_ ) > 0 and open_to_closed[stack.pop()] != s[i])
):
return False
return len(lowerCamelCase_ ) == 0
def UpperCAmelCase__ ( ):
__a : Any = input('Enter sequence of brackets: ' )
if is_balanced(lowerCamelCase_ ):
print(lowerCamelCase_ , 'is balanced' )
else:
print(lowerCamelCase_ , 'is not balanced' )
if __name__ == "__main__":
main()
| 47 |
from __future__ import annotations
from sys import maxsize
from typing import Generic, TypeVar
SCREAMING_SNAKE_CASE__ = TypeVar('''T''')
def UpperCAmelCase__ ( lowerCamelCase_ : int ):
return (position - 1) // 2
def UpperCAmelCase__ ( lowerCamelCase_ : int ):
return (2 * position) + 1
def UpperCAmelCase__ ( lowerCamelCase_ : int ):
return (2 * position) + 2
class _UpperCamelCase( Generic[T] ):
def __init__( self : List[str] ):
'''simple docstring'''
__a : list[tuple[T, int]] = []
__a : dict[T, int] = {}
__a : int = 0
def __len__( self : Any ):
'''simple docstring'''
return self.elements
def __repr__( self : Any ):
'''simple docstring'''
return str(self.heap )
def __lowerCAmelCase ( self : Optional[Any] ):
'''simple docstring'''
return self.elements == 0
def __lowerCAmelCase ( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : T , SCREAMING_SNAKE_CASE__ : int ):
'''simple docstring'''
self.heap.append((elem, weight) )
__a : List[Any] = self.elements
self.elements += 1
self._bubble_up(SCREAMING_SNAKE_CASE__ )
def __lowerCAmelCase ( self : Tuple ):
'''simple docstring'''
if self.elements > 1:
self._swap_nodes(0 , self.elements - 1 )
__a , __a : Union[str, Any] = self.heap.pop()
del self.position_map[elem]
self.elements -= 1
if self.elements > 0:
__a , __a : Dict = self.heap[0]
self._bubble_down(SCREAMING_SNAKE_CASE__ )
return elem
def __lowerCAmelCase ( self : List[str] , SCREAMING_SNAKE_CASE__ : T , SCREAMING_SNAKE_CASE__ : int ):
'''simple docstring'''
__a : List[Any] = self.position_map[elem]
__a : str = (elem, weight)
if position > 0:
__a : Tuple = get_parent_position(SCREAMING_SNAKE_CASE__ )
__a , __a : Dict = self.heap[parent_position]
if parent_weight > weight:
self._bubble_up(SCREAMING_SNAKE_CASE__ )
else:
self._bubble_down(SCREAMING_SNAKE_CASE__ )
else:
self._bubble_down(SCREAMING_SNAKE_CASE__ )
def __lowerCAmelCase ( self : Any , SCREAMING_SNAKE_CASE__ : T ):
'''simple docstring'''
__a : List[Any] = self.position_map[elem]
if curr_pos == 0:
return None
__a : List[str] = get_parent_position(SCREAMING_SNAKE_CASE__ )
__a , __a : str = self.heap[curr_pos]
__a , __a : Optional[int] = self.heap[parent_position]
if parent_weight > weight:
self._swap_nodes(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
return self._bubble_up(SCREAMING_SNAKE_CASE__ )
return None
def __lowerCAmelCase ( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : T ):
'''simple docstring'''
__a : int = self.position_map[elem]
__a , __a : Optional[Any] = self.heap[curr_pos]
__a : Tuple = get_child_left_position(SCREAMING_SNAKE_CASE__ )
__a : Optional[Any] = get_child_right_position(SCREAMING_SNAKE_CASE__ )
if child_left_position < self.elements and child_right_position < self.elements:
__a , __a : str = self.heap[child_left_position]
__a , __a : List[str] = self.heap[child_right_position]
if child_right_weight < child_left_weight and child_right_weight < weight:
self._swap_nodes(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
return self._bubble_down(SCREAMING_SNAKE_CASE__ )
if child_left_position < self.elements:
__a , __a : Any = self.heap[child_left_position]
if child_left_weight < weight:
self._swap_nodes(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
return self._bubble_down(SCREAMING_SNAKE_CASE__ )
else:
return None
if child_right_position < self.elements:
__a , __a : Union[str, Any] = self.heap[child_right_position]
if child_right_weight < weight:
self._swap_nodes(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
return self._bubble_down(SCREAMING_SNAKE_CASE__ )
return None
def __lowerCAmelCase ( self : List[Any] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int ):
'''simple docstring'''
__a : Optional[Any] = self.heap[nodea_pos][0]
__a : str = self.heap[nodea_pos][0]
__a , __a : int = (
self.heap[nodea_pos],
self.heap[nodea_pos],
)
__a : str = nodea_pos
__a : Optional[int] = nodea_pos
class _UpperCamelCase( Generic[T] ):
def __init__( self : List[Any] ):
'''simple docstring'''
__a : dict[T, dict[T, int]] = {}
__a : int = 0
def __repr__( self : Tuple ):
'''simple docstring'''
return str(self.connections )
def __len__( self : Dict ):
'''simple docstring'''
return self.nodes
def __lowerCAmelCase ( self : Optional[int] , SCREAMING_SNAKE_CASE__ : T ):
'''simple docstring'''
if node not in self.connections:
__a : Tuple = {}
self.nodes += 1
def __lowerCAmelCase ( self : Any , SCREAMING_SNAKE_CASE__ : T , SCREAMING_SNAKE_CASE__ : T , SCREAMING_SNAKE_CASE__ : int ):
'''simple docstring'''
self.add_node(SCREAMING_SNAKE_CASE__ )
self.add_node(SCREAMING_SNAKE_CASE__ )
__a : Optional[Any] = weight
__a : Any = weight
def UpperCAmelCase__ ( lowerCamelCase_ : GraphUndirectedWeighted[T] , ):
__a : dict[T, int] = {node: maxsize for node in graph.connections}
__a : dict[T, T | None] = {node: None for node in graph.connections}
__a : MinPriorityQueue[T] = MinPriorityQueue()
for node, weight in dist.items():
priority_queue.push(lowerCamelCase_ , lowerCamelCase_ )
if priority_queue.is_empty():
return dist, parent
# initialization
__a : Optional[int] = priority_queue.extract_min()
__a : int = 0
for neighbour in graph.connections[node]:
if dist[neighbour] > dist[node] + graph.connections[node][neighbour]:
__a : str = dist[node] + graph.connections[node][neighbour]
priority_queue.update_key(lowerCamelCase_ , dist[neighbour] )
__a : Optional[int] = node
# running prim's algorithm
while not priority_queue.is_empty():
__a : Any = priority_queue.extract_min()
for neighbour in graph.connections[node]:
if dist[neighbour] > dist[node] + graph.connections[node][neighbour]:
__a : Tuple = dist[node] + graph.connections[node][neighbour]
priority_queue.update_key(lowerCamelCase_ , dist[neighbour] )
__a : Dict = node
return dist, parent
| 47 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
SCREAMING_SNAKE_CASE__ = {'''configuration_yolos''': ['''YOLOS_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''YolosConfig''', '''YolosOnnxConfig''']}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ = ['''YolosFeatureExtractor''']
SCREAMING_SNAKE_CASE__ = ['''YolosImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ = [
'''YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''YolosForObjectDetection''',
'''YolosModel''',
'''YolosPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_yolos import YOLOS_PRETRAINED_CONFIG_ARCHIVE_MAP, YolosConfig, YolosOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_yolos import YolosFeatureExtractor
from .image_processing_yolos import YolosImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_yolos import (
YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST,
YolosForObjectDetection,
YolosModel,
YolosPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 47 |
from collections.abc import Sequence
from queue import Queue
class _UpperCamelCase:
def __init__( self : Tuple , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : int=None , SCREAMING_SNAKE_CASE__ : Tuple=None ):
'''simple docstring'''
__a : Tuple = start
__a : Dict = end
__a : List[str] = val
__a : List[Any] = (start + end) // 2
__a : Optional[Any] = left
__a : List[str] = right
def __repr__( self : Dict ):
'''simple docstring'''
return f'''SegmentTreeNode(start={self.start}, end={self.end}, val={self.val})'''
class _UpperCamelCase:
def __init__( self : List[str] , SCREAMING_SNAKE_CASE__ : Sequence , SCREAMING_SNAKE_CASE__ : Optional[Any] ):
'''simple docstring'''
__a : Tuple = collection
__a : Dict = function
if self.collection:
__a : int = self._build_tree(0 , len(SCREAMING_SNAKE_CASE__ ) - 1 )
def __lowerCAmelCase ( self : Optional[int] , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : int ):
'''simple docstring'''
self._update_tree(self.root , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def __lowerCAmelCase ( self : str , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : List[str] ):
'''simple docstring'''
return self._query_range(self.root , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def __lowerCAmelCase ( self : Optional[int] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Dict ):
'''simple docstring'''
if start == end:
return SegmentTreeNode(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , self.collection[start] )
__a : Tuple = (start + end) // 2
__a : Optional[int] = self._build_tree(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
__a : Tuple = self._build_tree(mid + 1 , SCREAMING_SNAKE_CASE__ )
return SegmentTreeNode(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , self.fn(left.val , right.val ) , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def __lowerCAmelCase ( self : List[Any] , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : List[Any] ):
'''simple docstring'''
if node.start == i and node.end == i:
__a : Optional[Any] = val
return
if i <= node.mid:
self._update_tree(node.left , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
else:
self._update_tree(node.right , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
__a : int = self.fn(node.left.val , node.right.val )
def __lowerCAmelCase ( self : Any , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Optional[Any] ):
'''simple docstring'''
if node.start == i and node.end == j:
return node.val
if i <= node.mid:
if j <= node.mid:
# range in left child tree
return self._query_range(node.left , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
else:
# range in left child tree and right child tree
return self.fn(
self._query_range(node.left , SCREAMING_SNAKE_CASE__ , node.mid ) , self._query_range(node.right , node.mid + 1 , SCREAMING_SNAKE_CASE__ ) , )
else:
# range in right child tree
return self._query_range(node.right , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def __lowerCAmelCase ( self : List[Any] ):
'''simple docstring'''
if self.root is not None:
__a : Tuple = Queue()
queue.put(self.root )
while not queue.empty():
__a : Tuple = queue.get()
yield node
if node.left is not None:
queue.put(node.left )
if node.right is not None:
queue.put(node.right )
if __name__ == "__main__":
import operator
for fn in [operator.add, max, min]:
print('''*''' * 50)
SCREAMING_SNAKE_CASE__ = SegmentTree([2, 1, 5, 3, 4], fn)
for node in arr.traverse():
print(node)
print()
arr.update(1, 5)
for node in arr.traverse():
print(node)
print()
print(arr.query_range(3, 4)) # 7
print(arr.query_range(2, 2)) # 5
print(arr.query_range(1, 3)) # 13
print()
| 47 | 1 |
import argparse
import ast
import logging
import os
import sys
import pandas as pd
import torch
from tqdm import tqdm
from transformers import BartForConditionalGeneration, RagRetriever, RagSequenceForGeneration, RagTokenForGeneration
from transformers import logging as transformers_logging
sys.path.append(os.path.join(os.getcwd())) # noqa: E402 # isort:skip
from utils_rag import exact_match_score, fa_score # noqa: E402 # isort:skip
SCREAMING_SNAKE_CASE__ = logging.getLogger(__name__)
logging.basicConfig(level=logging.INFO)
transformers_logging.set_verbosity_info()
def UpperCAmelCase__ ( lowerCamelCase_ : int ):
if "token" in model_name_or_path:
return "rag_token"
if "sequence" in model_name_or_path:
return "rag_sequence"
if "bart" in model_name_or_path:
return "bart"
return None
def UpperCAmelCase__ ( lowerCamelCase_ : Union[str, Any] , lowerCamelCase_ : Optional[Any] , lowerCamelCase_ : Dict ):
return max(metric_fn(lowerCamelCase_ , lowerCamelCase_ ) for gt in ground_truths )
def UpperCAmelCase__ ( lowerCamelCase_ : Optional[int] , lowerCamelCase_ : List[Any] , lowerCamelCase_ : Union[str, Any] ):
__a : Optional[Any] = [line.strip() for line in open(lowerCamelCase_ , 'r' ).readlines()]
__a : Any = []
if args.gold_data_mode == "qa":
__a : str = pd.read_csv(lowerCamelCase_ , sep='\t' , header=lowerCamelCase_ )
for answer_list in data[1]:
__a : List[str] = ast.literal_eval(lowerCamelCase_ )
answers.append(lowerCamelCase_ )
else:
__a : List[str] = [line.strip() for line in open(lowerCamelCase_ , 'r' ).readlines()]
__a : Optional[Any] = [[reference] for reference in references]
__a : Tuple = 0
for prediction, ground_truths in zip(lowerCamelCase_ , lowerCamelCase_ ):
total += 1
em += metric_max_over_ground_truths(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
fa += metric_max_over_ground_truths(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
__a : List[Any] = 100.0 * em / total
__a : int = 100.0 * fa / total
logger.info(f'''F1: {fa:.2f}''' )
logger.info(f'''EM: {em:.2f}''' )
def UpperCAmelCase__ ( lowerCamelCase_ : str , lowerCamelCase_ : List[str] , lowerCamelCase_ : int ):
__a : str = args.k
__a : Optional[Any] = [line.strip() for line in open(lowerCamelCase_ , 'r' ).readlines()]
__a : Tuple = [line.strip() for line in open(lowerCamelCase_ , 'r' ).readlines()]
__a : Optional[Any] = 0
for hypo, reference in zip(lowerCamelCase_ , lowerCamelCase_ ):
__a : Dict = set(hypo.split('\t' )[:k] )
__a : List[str] = set(reference.split('\t' ) )
total += 1
em += len(hypo_provenance & ref_provenance ) / k
__a : List[Any] = 100.0 * em / total
logger.info(f'''Precision@{k}: {em: .2f}''' )
def UpperCAmelCase__ ( lowerCamelCase_ : Any , lowerCamelCase_ : List[Any] , lowerCamelCase_ : List[Any] ):
def strip_title(lowerCamelCase_ : Dict ):
if title.startswith('"' ):
__a : List[str] = title[1:]
if title.endswith('"' ):
__a : str = title[:-1]
return title
__a : List[str] = rag_model.retriever.question_encoder_tokenizer.batch_encode_plus(
lowerCamelCase_ , return_tensors='pt' , padding=lowerCamelCase_ , truncation=lowerCamelCase_ , )['input_ids'].to(args.device )
__a : Union[str, Any] = rag_model.rag.question_encoder(lowerCamelCase_ )
__a : List[Any] = question_enc_outputs[0]
__a : str = rag_model.retriever(
lowerCamelCase_ , question_enc_pool_output.cpu().detach().to(torch.floataa ).numpy() , prefix=rag_model.rag.generator.config.prefix , n_docs=rag_model.config.n_docs , return_tensors='pt' , )
__a : str = rag_model.retriever.index.get_doc_dicts(result.doc_ids )
__a : List[Any] = []
for docs in all_docs:
__a : Tuple = [strip_title(lowerCamelCase_ ) for title in docs['title']]
provenance_strings.append('\t'.join(lowerCamelCase_ ) )
return provenance_strings
def UpperCAmelCase__ ( lowerCamelCase_ : List[str] , lowerCamelCase_ : Tuple , lowerCamelCase_ : Optional[int] ):
with torch.no_grad():
__a : int = rag_model.retriever.question_encoder_tokenizer.batch_encode_plus(
lowerCamelCase_ , return_tensors='pt' , padding=lowerCamelCase_ , truncation=lowerCamelCase_ )
__a : int = inputs_dict.input_ids.to(args.device )
__a : Dict = inputs_dict.attention_mask.to(args.device )
__a : List[str] = rag_model.generate( # rag_model overwrites generate
lowerCamelCase_ , attention_mask=lowerCamelCase_ , num_beams=args.num_beams , min_length=args.min_length , max_length=args.max_length , early_stopping=lowerCamelCase_ , num_return_sequences=1 , bad_words_ids=[[0, 0]] , )
__a : Optional[int] = rag_model.retriever.generator_tokenizer.batch_decode(lowerCamelCase_ , skip_special_tokens=lowerCamelCase_ )
if args.print_predictions:
for q, a in zip(lowerCamelCase_ , lowerCamelCase_ ):
logger.info('Q: {} - A: {}'.format(lowerCamelCase_ , lowerCamelCase_ ) )
return answers
def UpperCAmelCase__ ( ):
__a : Union[str, Any] = argparse.ArgumentParser()
parser.add_argument(
'--model_type' , choices=['rag_sequence', 'rag_token', 'bart'] , type=lowerCamelCase_ , help=(
'RAG model type: rag_sequence, rag_token or bart, if none specified, the type is inferred from the'
' model_name_or_path'
) , )
parser.add_argument(
'--index_name' , default=lowerCamelCase_ , choices=['exact', 'compressed', 'legacy'] , type=lowerCamelCase_ , help='RAG model retriever type' , )
parser.add_argument(
'--index_path' , default=lowerCamelCase_ , type=lowerCamelCase_ , help='Path to the retrieval index' , )
parser.add_argument('--n_docs' , default=5 , type=lowerCamelCase_ , help='Number of retrieved docs' )
parser.add_argument(
'--model_name_or_path' , default=lowerCamelCase_ , type=lowerCamelCase_ , required=lowerCamelCase_ , help='Path to pretrained checkpoints or model identifier from huggingface.co/models' , )
parser.add_argument(
'--eval_mode' , choices=['e2e', 'retrieval'] , default='e2e' , type=lowerCamelCase_ , help=(
'Evaluation mode, e2e calculates exact match and F1 of the downstream task, retrieval calculates'
' precision@k.'
) , )
parser.add_argument('--k' , default=1 , type=lowerCamelCase_ , help='k for the precision@k calculation' )
parser.add_argument(
'--evaluation_set' , default=lowerCamelCase_ , type=lowerCamelCase_ , required=lowerCamelCase_ , help='Path to a file containing evaluation samples' , )
parser.add_argument(
'--gold_data_path' , default=lowerCamelCase_ , type=lowerCamelCase_ , required=lowerCamelCase_ , help='Path to a tab-separated file with gold samples' , )
parser.add_argument(
'--gold_data_mode' , default='qa' , type=lowerCamelCase_ , choices=['qa', 'ans'] , help=(
'Format of the gold data file'
'qa - a single line in the following format: question [tab] answer_list'
'ans - a single line of the gold file contains the expected answer string'
) , )
parser.add_argument(
'--predictions_path' , type=lowerCamelCase_ , default='predictions.txt' , help='Name of the predictions file, to be stored in the checkpoints directory' , )
parser.add_argument(
'--eval_all_checkpoints' , action='store_true' , help='Evaluate all checkpoints starting with the same prefix as model_name ending and ending with step number' , )
parser.add_argument(
'--eval_batch_size' , default=8 , type=lowerCamelCase_ , help='Batch size per GPU/CPU for evaluation.' , )
parser.add_argument(
'--recalculate' , help='Recalculate predictions even if the prediction file exists' , action='store_true' , )
parser.add_argument(
'--num_beams' , default=4 , type=lowerCamelCase_ , help='Number of beams to be used when generating answers' , )
parser.add_argument('--min_length' , default=1 , type=lowerCamelCase_ , help='Min length of the generated answers' )
parser.add_argument('--max_length' , default=5_0 , type=lowerCamelCase_ , help='Max length of the generated answers' )
parser.add_argument(
'--print_predictions' , action='store_true' , help='If True, prints predictions while evaluating.' , )
parser.add_argument(
'--print_docs' , action='store_true' , help='If True, prints docs retried while generating.' , )
__a : Tuple = parser.parse_args()
__a : List[Any] = torch.device('cuda' if torch.cuda.is_available() else 'cpu' )
return args
def UpperCAmelCase__ ( lowerCamelCase_ : List[str] ):
__a : Union[str, Any] = {}
if args.model_type is None:
__a : List[Any] = infer_model_type(args.model_name_or_path )
assert args.model_type is not None
if args.model_type.startswith('rag' ):
__a : Union[str, Any] = RagTokenForGeneration if args.model_type == 'rag_token' else RagSequenceForGeneration
__a : List[Any] = args.n_docs
if args.index_name is not None:
__a : Union[str, Any] = args.index_name
if args.index_path is not None:
__a : Union[str, Any] = args.index_path
else:
__a : List[Any] = BartForConditionalGeneration
__a : Optional[int] = (
[f.path for f in os.scandir(args.model_name_or_path ) if f.is_dir()]
if args.eval_all_checkpoints
else [args.model_name_or_path]
)
logger.info('Evaluate the following checkpoints: %s' , lowerCamelCase_ )
__a : Tuple = get_scores if args.eval_mode == 'e2e' else get_precision_at_k
__a : int = evaluate_batch_eae if args.eval_mode == 'e2e' else evaluate_batch_retrieval
for checkpoint in checkpoints:
if os.path.exists(args.predictions_path ) and (not args.recalculate):
logger.info('Calculating metrics based on an existing predictions file: {}'.format(args.predictions_path ) )
score_fn(lowerCamelCase_ , args.predictions_path , args.gold_data_path )
continue
logger.info('***** Running evaluation for {} *****'.format(lowerCamelCase_ ) )
logger.info(' Batch size = %d' , args.eval_batch_size )
logger.info(' Predictions will be stored under {}'.format(args.predictions_path ) )
if args.model_type.startswith('rag' ):
__a : Union[str, Any] = RagRetriever.from_pretrained(lowerCamelCase_ , **lowerCamelCase_ )
__a : List[Any] = model_class.from_pretrained(lowerCamelCase_ , retriever=lowerCamelCase_ , **lowerCamelCase_ )
model.retriever.init_retrieval()
else:
__a : List[str] = model_class.from_pretrained(lowerCamelCase_ , **lowerCamelCase_ )
model.to(args.device )
with open(args.evaluation_set , 'r' ) as eval_file, open(args.predictions_path , 'w' ) as preds_file:
__a : List[Any] = []
for line in tqdm(lowerCamelCase_ ):
questions.append(line.strip() )
if len(lowerCamelCase_ ) == args.eval_batch_size:
__a : Tuple = evaluate_batch_fn(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
preds_file.write('\n'.join(lowerCamelCase_ ) + '\n' )
preds_file.flush()
__a : Optional[int] = []
if len(lowerCamelCase_ ) > 0:
__a : int = evaluate_batch_fn(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
preds_file.write('\n'.join(lowerCamelCase_ ) )
preds_file.flush()
score_fn(lowerCamelCase_ , args.predictions_path , args.gold_data_path )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ = get_args()
main(args)
| 47 |
import os
import posixpath
import uuid
from dataclasses import dataclass
from typing import TYPE_CHECKING, Iterable, List, Optional, Tuple, Union
import numpy as np
import pyarrow as pa
import datasets
from datasets.arrow_writer import ArrowWriter, ParquetWriter
from datasets.config import MAX_SHARD_SIZE
from datasets.filesystems import (
is_remote_filesystem,
rename,
)
from datasets.iterable_dataset import _BaseExamplesIterable
from datasets.utils.py_utils import convert_file_size_to_int
SCREAMING_SNAKE_CASE__ = datasets.utils.logging.get_logger(__name__)
if TYPE_CHECKING:
import pyspark
@dataclass
class _UpperCamelCase( datasets.BuilderConfig ):
__SCREAMING_SNAKE_CASE : Optional[datasets.Features] = None
def UpperCAmelCase__ ( lowerCamelCase_ : "pyspark.sql.DataFrame" , lowerCamelCase_ : List[int] , ):
import pyspark
def generate_fn():
__a : List[Any] = df.select('*' , pyspark.sql.functions.spark_partition_id().alias('part_id' ) )
for partition_id in partition_order:
__a : Optional[int] = df_with_partition_id.select('*' ).where(f'''part_id = {partition_id}''' ).drop('part_id' )
__a : Optional[Any] = partition_df.collect()
__a : Union[str, Any] = 0
for row in rows:
yield f'''{partition_id}_{row_id}''', row.asDict()
row_id += 1
return generate_fn
class _UpperCamelCase( _BaseExamplesIterable ):
def __init__( self : Any , SCREAMING_SNAKE_CASE__ : "pyspark.sql.DataFrame" , SCREAMING_SNAKE_CASE__ : Dict=None , ):
'''simple docstring'''
__a : List[str] = df
__a : Tuple = partition_order or range(self.df.rdd.getNumPartitions() )
__a : List[Any] = _generate_iterable_examples(self.df , self.partition_order )
def __iter__( self : Tuple ):
'''simple docstring'''
yield from self.generate_examples_fn()
def __lowerCAmelCase ( self : Optional[int] , SCREAMING_SNAKE_CASE__ : np.random.Generator ):
'''simple docstring'''
__a : Union[str, Any] = list(range(self.df.rdd.getNumPartitions() ) )
generator.shuffle(SCREAMING_SNAKE_CASE__ )
return SparkExamplesIterable(self.df , partition_order=SCREAMING_SNAKE_CASE__ )
def __lowerCAmelCase ( self : str , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int ):
'''simple docstring'''
__a : Union[str, Any] = self.split_shard_indices_by_worker(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
return SparkExamplesIterable(self.df , partition_order=SCREAMING_SNAKE_CASE__ )
@property
def __lowerCAmelCase ( self : Dict ):
'''simple docstring'''
return len(self.partition_order )
class _UpperCamelCase( datasets.DatasetBuilder ):
__SCREAMING_SNAKE_CASE : List[str] = SparkConfig
def __init__( self : List[str] , SCREAMING_SNAKE_CASE__ : "pyspark.sql.DataFrame" , SCREAMING_SNAKE_CASE__ : str = None , SCREAMING_SNAKE_CASE__ : str = None , **SCREAMING_SNAKE_CASE__ : Optional[int] , ):
'''simple docstring'''
import pyspark
__a : int = pyspark.sql.SparkSession.builder.getOrCreate()
__a : Optional[int] = df
__a : List[Any] = working_dir
super().__init__(
cache_dir=SCREAMING_SNAKE_CASE__ , config_name=str(self.df.semanticHash() ) , **SCREAMING_SNAKE_CASE__ , )
def __lowerCAmelCase ( self : Optional[int] ):
'''simple docstring'''
def create_cache_and_write_probe(SCREAMING_SNAKE_CASE__ : List[str] ):
# makedirs with exist_ok will recursively create the directory. It will not throw an error if directories
# already exist.
os.makedirs(self._cache_dir , exist_ok=SCREAMING_SNAKE_CASE__ )
__a : List[Any] = os.path.join(self._cache_dir , 'fs_test' + uuid.uuida().hex )
# Opening the file in append mode will create a new file unless it already exists, in which case it will not
# change the file contents.
open(SCREAMING_SNAKE_CASE__ , 'a' )
return [probe_file]
if self._spark.conf.get('spark.master' , '' ).startswith('local' ):
return
# If the cluster is multi-node, make sure that the user provided a cache_dir and that it is on an NFS
# accessible to the driver.
# TODO: Stream batches to the driver using ArrowCollectSerializer instead of throwing an error.
if self._cache_dir:
__a : List[Any] = (
self._spark.sparkContext.parallelize(range(1 ) , 1 ).mapPartitions(SCREAMING_SNAKE_CASE__ ).collect()
)
if os.path.isfile(probe[0] ):
return
raise ValueError(
'When using Dataset.from_spark on a multi-node cluster, the driver and all workers should be able to access cache_dir' )
def __lowerCAmelCase ( self : List[str] ):
'''simple docstring'''
return datasets.DatasetInfo(features=self.config.features )
def __lowerCAmelCase ( self : int , SCREAMING_SNAKE_CASE__ : datasets.download.download_manager.DownloadManager ):
'''simple docstring'''
return [datasets.SplitGenerator(name=datasets.Split.TRAIN )]
def __lowerCAmelCase ( self : List[str] , SCREAMING_SNAKE_CASE__ : Any ):
'''simple docstring'''
import pyspark
def get_arrow_batch_size(SCREAMING_SNAKE_CASE__ : int ):
for batch in it:
yield pa.RecordBatch.from_pydict({'batch_bytes': [batch.nbytes]} )
__a : List[str] = self.df.count()
__a : Dict = df_num_rows if df_num_rows <= 1_0_0 else 1_0_0
# Approximate the size of each row (in Arrow format) by averaging over a max-100-row sample.
__a : List[str] = (
self.df.limit(SCREAMING_SNAKE_CASE__ )
.repartition(1 )
.mapInArrow(SCREAMING_SNAKE_CASE__ , 'batch_bytes: long' )
.agg(pyspark.sql.functions.sum('batch_bytes' ).alias('sample_bytes' ) )
.collect()[0]
.sample_bytes
/ sample_num_rows
)
__a : Dict = approx_bytes_per_row * df_num_rows
if approx_total_size > max_shard_size:
# Make sure there is at least one row per partition.
__a : Union[str, Any] = min(SCREAMING_SNAKE_CASE__ , int(approx_total_size / max_shard_size ) )
__a : int = self.df.repartition(SCREAMING_SNAKE_CASE__ )
def __lowerCAmelCase ( self : Tuple , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : int , ):
'''simple docstring'''
import pyspark
__a : Any = ParquetWriter if file_format == 'parquet' else ArrowWriter
__a : Union[str, Any] = os.path.join(self._working_dir , os.path.basename(SCREAMING_SNAKE_CASE__ ) ) if self._working_dir else fpath
__a : Optional[int] = file_format == 'parquet'
# Define these so that we don't reference self in write_arrow, which will result in a pickling error due to
# pickling the SparkContext.
__a : List[str] = self.config.features
__a : int = self._writer_batch_size
__a : Union[str, Any] = self._fs.storage_options
def write_arrow(SCREAMING_SNAKE_CASE__ : Optional[int] ):
# Within the same SparkContext, no two task attempts will share the same attempt ID.
__a : Any = pyspark.TaskContext().taskAttemptId()
__a : str = next(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
if first_batch is None:
# Some partitions might not receive any data.
return pa.RecordBatch.from_arrays(
[[task_id], [0], [0]] , names=['task_id', 'num_examples', 'num_bytes'] , )
__a : Any = 0
__a : List[str] = writer_class(
features=SCREAMING_SNAKE_CASE__ , path=working_fpath.replace('SSSSS' , f'''{shard_id:05d}''' ).replace('TTTTT' , f'''{task_id:05d}''' ) , writer_batch_size=SCREAMING_SNAKE_CASE__ , storage_options=SCREAMING_SNAKE_CASE__ , embed_local_files=SCREAMING_SNAKE_CASE__ , )
__a : Optional[Any] = pa.Table.from_batches([first_batch] )
writer.write_table(SCREAMING_SNAKE_CASE__ )
for batch in it:
if max_shard_size is not None and writer._num_bytes >= max_shard_size:
__a , __a : Optional[int] = writer.finalize()
writer.close()
yield pa.RecordBatch.from_arrays(
[[task_id], [num_examples], [num_bytes]] , names=['task_id', 'num_examples', 'num_bytes'] , )
shard_id += 1
__a : Optional[Any] = writer_class(
features=writer._features , path=working_fpath.replace('SSSSS' , f'''{shard_id:05d}''' ).replace('TTTTT' , f'''{task_id:05d}''' ) , writer_batch_size=SCREAMING_SNAKE_CASE__ , storage_options=SCREAMING_SNAKE_CASE__ , embed_local_files=SCREAMING_SNAKE_CASE__ , )
__a : Union[str, Any] = pa.Table.from_batches([batch] )
writer.write_table(SCREAMING_SNAKE_CASE__ )
if writer._num_bytes > 0:
__a , __a : str = writer.finalize()
writer.close()
yield pa.RecordBatch.from_arrays(
[[task_id], [num_examples], [num_bytes]] , names=['task_id', 'num_examples', 'num_bytes'] , )
if working_fpath != fpath:
for file in os.listdir(os.path.dirname(SCREAMING_SNAKE_CASE__ ) ):
__a : Any = os.path.join(os.path.dirname(SCREAMING_SNAKE_CASE__ ) , os.path.basename(SCREAMING_SNAKE_CASE__ ) )
shutil.move(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
__a : Dict = (
self.df.mapInArrow(SCREAMING_SNAKE_CASE__ , 'task_id: long, num_examples: long, num_bytes: long' )
.groupBy('task_id' )
.agg(
pyspark.sql.functions.sum('num_examples' ).alias('total_num_examples' ) , pyspark.sql.functions.sum('num_bytes' ).alias('total_num_bytes' ) , pyspark.sql.functions.count('num_bytes' ).alias('num_shards' ) , pyspark.sql.functions.collect_list('num_examples' ).alias('shard_lengths' ) , )
.collect()
)
for row in stats:
yield row.task_id, (row.total_num_examples, row.total_num_bytes, row.num_shards, row.shard_lengths)
def __lowerCAmelCase ( self : Union[str, Any] , SCREAMING_SNAKE_CASE__ : "datasets.SplitGenerator" , SCREAMING_SNAKE_CASE__ : str = "arrow" , SCREAMING_SNAKE_CASE__ : Optional[Union[str, int]] = None , SCREAMING_SNAKE_CASE__ : Optional[int] = None , **SCREAMING_SNAKE_CASE__ : Optional[Any] , ):
'''simple docstring'''
self._validate_cache_dir()
__a : List[str] = convert_file_size_to_int(max_shard_size or MAX_SHARD_SIZE )
self._repartition_df_if_needed(SCREAMING_SNAKE_CASE__ )
__a : Union[str, Any] = not is_remote_filesystem(self._fs )
__a : Optional[Any] = os.path.join if is_local else posixpath.join
__a : Any = '-TTTTT-SSSSS-of-NNNNN'
__a : Union[str, Any] = f'''{self.name}-{split_generator.name}{SUFFIX}.{file_format}'''
__a : Any = path_join(self._output_dir , SCREAMING_SNAKE_CASE__ )
__a : Any = 0
__a : Dict = 0
__a : int = 0
__a : List[str] = []
__a : Optional[int] = []
for task_id, content in self._prepare_split_single(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
(
(
__a
) , (
__a
) , (
__a
) , (
__a
) ,
) : Optional[int] = content
if num_bytes > 0:
total_num_examples += num_examples
total_num_bytes += num_bytes
total_shards += num_shards
task_id_and_num_shards.append((task_id, num_shards) )
all_shard_lengths.extend(SCREAMING_SNAKE_CASE__ )
__a : List[str] = total_num_examples
__a : Optional[int] = total_num_bytes
# should rename everything at the end
logger.debug(f'''Renaming {total_shards} shards.''' )
if total_shards > 1:
__a : Any = all_shard_lengths
# Define fs outside of _rename_shard so that we don't reference self in the function, which will result in a
# pickling error due to pickling the SparkContext.
__a : Dict = self._fs
# use the -SSSSS-of-NNNNN pattern
def _rename_shard(
SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int , ):
rename(
SCREAMING_SNAKE_CASE__ , fpath.replace('SSSSS' , f'''{shard_id:05d}''' ).replace('TTTTT' , f'''{task_id:05d}''' ) , fpath.replace('TTTTT-SSSSS' , f'''{global_shard_id:05d}''' ).replace('NNNNN' , f'''{total_shards:05d}''' ) , )
__a : Union[str, Any] = []
__a : List[str] = 0
for i in range(len(SCREAMING_SNAKE_CASE__ ) ):
__a , __a : Union[str, Any] = task_id_and_num_shards[i]
for shard_id in range(SCREAMING_SNAKE_CASE__ ):
args.append([task_id, shard_id, global_shard_id] )
global_shard_id += 1
self._spark.sparkContext.parallelize(SCREAMING_SNAKE_CASE__ , len(SCREAMING_SNAKE_CASE__ ) ).map(lambda SCREAMING_SNAKE_CASE__ : _rename_shard(*SCREAMING_SNAKE_CASE__ ) ).collect()
else:
# don't use any pattern
__a : List[Any] = 0
__a : Any = task_id_and_num_shards[0][0]
self._rename(
fpath.replace('SSSSS' , f'''{shard_id:05d}''' ).replace('TTTTT' , f'''{task_id:05d}''' ) , fpath.replace(SCREAMING_SNAKE_CASE__ , '' ) , )
def __lowerCAmelCase ( self : str , SCREAMING_SNAKE_CASE__ : "datasets.SplitGenerator" , ):
'''simple docstring'''
return SparkExamplesIterable(self.df )
| 47 | 1 |
from collections import Counter
from pathlib import Path
from typing import Optional, Tuple
import yaml
class _UpperCamelCase( yaml.SafeLoader ):
def __lowerCAmelCase ( self : Union[str, Any] , SCREAMING_SNAKE_CASE__ : int ):
'''simple docstring'''
__a : Dict = [self.constructed_objects[key_node] for key_node, _ in node.value]
__a : Dict = [tuple(SCREAMING_SNAKE_CASE__ ) if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) else key for key in keys]
__a : Optional[int] = Counter(SCREAMING_SNAKE_CASE__ )
__a : Dict = [key for key in counter if counter[key] > 1]
if duplicate_keys:
raise TypeError(f'''Got duplicate yaml keys: {duplicate_keys}''' )
def __lowerCAmelCase ( self : str , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : List[str]=False ):
'''simple docstring'''
__a : List[Any] = super().construct_mapping(SCREAMING_SNAKE_CASE__ , deep=SCREAMING_SNAKE_CASE__ )
self._check_no_duplicates_on_constructed_node(SCREAMING_SNAKE_CASE__ )
return mapping
def UpperCAmelCase__ ( lowerCamelCase_ : str ):
__a : List[str] = list(readme_content.splitlines() )
if full_content and full_content[0] == "---" and "---" in full_content[1:]:
__a : int = full_content[1:].index('---' ) + 1
__a : Any = '\n'.join(full_content[1:sep_idx] )
return yamlblock, "\n".join(full_content[sep_idx + 1 :] )
return None, "\n".join(lowerCamelCase_ )
class _UpperCamelCase( __lowerCamelCase ):
# class attributes
__SCREAMING_SNAKE_CASE : List[Any] = {'''train_eval_index'''} # train-eval-index in the YAML metadata
@classmethod
def __lowerCAmelCase ( cls : Dict , SCREAMING_SNAKE_CASE__ : Path ):
'''simple docstring'''
with open(SCREAMING_SNAKE_CASE__ , encoding='utf-8' ) as readme_file:
__a , __a : Optional[Any] = _split_yaml_from_readme(readme_file.read() )
if yaml_string is not None:
return cls.from_yaml_string(SCREAMING_SNAKE_CASE__ )
else:
return cls()
def __lowerCAmelCase ( self : Optional[int] , SCREAMING_SNAKE_CASE__ : Path ):
'''simple docstring'''
if path.exists():
with open(SCREAMING_SNAKE_CASE__ , encoding='utf-8' ) as readme_file:
__a : Optional[Any] = readme_file.read()
else:
__a : str = None
__a : Optional[int] = self._to_readme(SCREAMING_SNAKE_CASE__ )
with open(SCREAMING_SNAKE_CASE__ , 'w' , encoding='utf-8' ) as readme_file:
readme_file.write(SCREAMING_SNAKE_CASE__ )
def __lowerCAmelCase ( self : Tuple , SCREAMING_SNAKE_CASE__ : Optional[str] = None ):
'''simple docstring'''
if readme_content is not None:
__a , __a : str = _split_yaml_from_readme(SCREAMING_SNAKE_CASE__ )
__a : List[str] = '---\n' + self.to_yaml_string() + '---\n' + content
else:
__a : Tuple = '---\n' + self.to_yaml_string() + '---\n'
return full_content
@classmethod
def __lowerCAmelCase ( cls : str , SCREAMING_SNAKE_CASE__ : str ):
'''simple docstring'''
__a : Any = yaml.load(SCREAMING_SNAKE_CASE__ , Loader=_NoDuplicateSafeLoader ) or {}
# Convert the YAML keys to DatasetMetadata fields
__a : str = {
(key.replace('-' , '_' ) if key.replace('-' , '_' ) in cls._FIELDS_WITH_DASHES else key): value
for key, value in metadata_dict.items()
}
return cls(**SCREAMING_SNAKE_CASE__ )
def __lowerCAmelCase ( self : str ):
'''simple docstring'''
return yaml.safe_dump(
{
(key.replace('_' , '-' ) if key in self._FIELDS_WITH_DASHES else key): value
for key, value in self.items()
} , sort_keys=SCREAMING_SNAKE_CASE__ , allow_unicode=SCREAMING_SNAKE_CASE__ , encoding='utf-8' , ).decode('utf-8' )
SCREAMING_SNAKE_CASE__ = {
'''image-classification''': [],
'''translation''': [],
'''image-segmentation''': [],
'''fill-mask''': [],
'''automatic-speech-recognition''': [],
'''token-classification''': [],
'''sentence-similarity''': [],
'''audio-classification''': [],
'''question-answering''': [],
'''summarization''': [],
'''zero-shot-classification''': [],
'''table-to-text''': [],
'''feature-extraction''': [],
'''other''': [],
'''multiple-choice''': [],
'''text-classification''': [],
'''text-to-image''': [],
'''text2text-generation''': [],
'''zero-shot-image-classification''': [],
'''tabular-classification''': [],
'''tabular-regression''': [],
'''image-to-image''': [],
'''tabular-to-text''': [],
'''unconditional-image-generation''': [],
'''text-retrieval''': [],
'''text-to-speech''': [],
'''object-detection''': [],
'''audio-to-audio''': [],
'''text-generation''': [],
'''conversational''': [],
'''table-question-answering''': [],
'''visual-question-answering''': [],
'''image-to-text''': [],
'''reinforcement-learning''': [],
'''voice-activity-detection''': [],
'''time-series-forecasting''': [],
'''document-question-answering''': [],
}
if __name__ == "__main__":
from argparse import ArgumentParser
SCREAMING_SNAKE_CASE__ = ArgumentParser(usage='''Validate the yaml metadata block of a README.md file.''')
ap.add_argument('''readme_filepath''')
SCREAMING_SNAKE_CASE__ = ap.parse_args()
SCREAMING_SNAKE_CASE__ = Path(args.readme_filepath)
SCREAMING_SNAKE_CASE__ = DatasetMetadata.from_readme(readme_filepath)
print(dataset_metadata)
dataset_metadata.to_readme(readme_filepath)
| 47 |
import argparse
import logging
import os
from datetime import datetime
import numpy as np
import torch
from torch import nn
from torch.utils.data import DataLoader, RandomSampler, TensorDataset
from tqdm import tqdm
from transformers import GPTaLMHeadModel
SCREAMING_SNAKE_CASE__ = logging.getLogger(__name__)
def UpperCAmelCase__ ( lowerCamelCase_ : str , lowerCamelCase_ : int ):
# save results
if os.path.exists(lowerCamelCase_ ):
if os.path.exists(os.path.join(lowerCamelCase_ , 'config.json' ) ) and os.path.isfile(
os.path.join(lowerCamelCase_ , 'config.json' ) ):
os.remove(os.path.join(lowerCamelCase_ , 'config.json' ) )
if os.path.exists(os.path.join(lowerCamelCase_ , 'pytorch_model.bin' ) ) and os.path.isfile(
os.path.join(lowerCamelCase_ , 'pytorch_model.bin' ) ):
os.remove(os.path.join(lowerCamelCase_ , 'pytorch_model.bin' ) )
else:
os.makedirs(lowerCamelCase_ )
model.save_pretrained(lowerCamelCase_ )
def UpperCAmelCase__ ( lowerCamelCase_ : int , lowerCamelCase_ : Any=False ):
__a : Dict = 2
if unlogit:
__a : Optional[Any] = torch.pow(lowerCamelCase_ , lowerCamelCase_ )
__a : Any = p * torch.log(lowerCamelCase_ )
__a : Union[str, Any] = 0
return -plogp.sum(dim=-1 )
def UpperCAmelCase__ ( lowerCamelCase_ : Any ):
logger.info('lv, h >\t' + '\t'.join(f'''{x + 1}''' for x in range(len(lowerCamelCase_ ) ) ) )
for row in range(len(lowerCamelCase_ ) ):
if tensor.dtype != torch.long:
logger.info(f'''layer {row + 1}:\t''' + '\t'.join(f'''{x:.5f}''' for x in tensor[row].cpu().data ) )
else:
logger.info(f'''layer {row + 1}:\t''' + '\t'.join(f'''{x:d}''' for x in tensor[row].cpu().data ) )
def UpperCAmelCase__ ( lowerCamelCase_ : Optional[int] , lowerCamelCase_ : Any , lowerCamelCase_ : int , lowerCamelCase_ : int=True , lowerCamelCase_ : Optional[Any]=True , lowerCamelCase_ : List[Any]=None , lowerCamelCase_ : List[Any]=False ):
__a , __a : Optional[int] = model.config.num_hidden_layers, model.config.num_attention_heads
__a : str = torch.zeros(lowerCamelCase_ , lowerCamelCase_ ).to(args.device )
__a : int = torch.zeros(lowerCamelCase_ , lowerCamelCase_ ).to(args.device )
if head_mask is None:
__a : Union[str, Any] = torch.ones(lowerCamelCase_ , lowerCamelCase_ ).to(args.device )
head_mask.requires_grad_(requires_grad=lowerCamelCase_ )
# If actually pruned attention multi-head, set head mask to None to avoid shape mismatch
if actually_pruned:
__a : Any = None
__a : Optional[int] = 0.0
__a : Optional[Any] = 0.0
for step, inputs in enumerate(tqdm(lowerCamelCase_ , desc='Iteration' , disable=args.local_rank not in [-1, 0] ) ):
__a : Dict = tuple(t.to(args.device ) for t in inputs )
((__a) , ) : Dict = inputs
# Do a forward pass (not with torch.no_grad() since we need gradients for importance score - see below)
__a : List[Any] = model(lowerCamelCase_ , labels=lowerCamelCase_ , head_mask=lowerCamelCase_ )
# (loss), lm_logits, presents, (all hidden_states), (attentions)
__a , __a , __a : int = (
outputs[0],
outputs[1],
outputs[-1],
) # Loss and logits are the first, attention the last
loss.backward() # Backpropagate to populate the gradients in the head mask
total_loss += loss.detach().cpu().numpy()
if compute_entropy:
for layer, attn in enumerate(lowerCamelCase_ ):
__a : List[str] = entropy(attn.detach() , lowerCamelCase_ )
attn_entropy[layer] += masked_entropy.sum(-1 ).sum(0 ).sum(0 ).detach()
if compute_importance:
head_importance += head_mask.grad.abs().detach()
tot_tokens += torch.ones_like(lowerCamelCase_ ).float().detach().sum().data
# Normalize
attn_entropy /= tot_tokens
head_importance /= tot_tokens
# Layerwise importance normalization
if not args.dont_normalize_importance_by_layer:
__a : Optional[Any] = 2
__a : Union[str, Any] = torch.pow(torch.pow(lowerCamelCase_ , lowerCamelCase_ ).sum(-1 ) , 1 / exponent )
head_importance /= norm_by_layer.unsqueeze(-1 ) + 1e-20
if not args.dont_normalize_global_importance:
__a : List[str] = (head_importance - head_importance.min()) / (head_importance.max() - head_importance.min())
# Print matrices
if compute_entropy:
logger.info('Attention entropies' )
print_ad_tensor(lowerCamelCase_ )
if compute_importance:
logger.info('Head importance scores' )
print_ad_tensor(lowerCamelCase_ )
logger.info('Head ranked by importance scores' )
__a : Optional[Any] = torch.zeros(head_importance.numel() , dtype=torch.long , device=args.device )
__a : str = torch.arange(
head_importance.numel() , device=args.device )
__a : Tuple = head_ranks.view_as(lowerCamelCase_ )
print_ad_tensor(lowerCamelCase_ )
return attn_entropy, head_importance, total_loss
def UpperCAmelCase__ ( lowerCamelCase_ : Union[str, Any] , lowerCamelCase_ : Tuple , lowerCamelCase_ : int ):
__a , __a , __a : Optional[int] = compute_heads_importance(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , compute_entropy=lowerCamelCase_ )
__a : Tuple = 1 / loss # instead of downsteam score use the LM loss
logger.info('Pruning: original score: %f, threshold: %f' , lowerCamelCase_ , original_score * args.masking_threshold )
__a : Tuple = torch.ones_like(lowerCamelCase_ )
__a : int = max(1 , int(new_head_mask.numel() * args.masking_amount ) )
__a : Tuple = original_score
while current_score >= original_score * args.masking_threshold:
__a : Optional[Any] = new_head_mask.clone().detach() # save current head mask
# heads from least important to most - keep only not-masked heads
__a : List[str] = float('Inf' )
__a : List[Any] = head_importance.view(-1 ).sort()[1]
if len(lowerCamelCase_ ) <= num_to_mask:
print('BREAK BY num_to_mask' )
break
# mask heads
__a : Any = current_heads_to_mask[:num_to_mask]
logger.info('Heads to mask: %s' , str(current_heads_to_mask.tolist() ) )
__a : int = new_head_mask.view(-1 )
__a : Tuple = 0.0
__a : int = new_head_mask.view_as(lowerCamelCase_ )
__a : Optional[int] = new_head_mask.clone().detach()
print_ad_tensor(lowerCamelCase_ )
# Compute metric and head importance again
__a , __a , __a : int = compute_heads_importance(
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , compute_entropy=lowerCamelCase_ , head_mask=lowerCamelCase_ )
__a : List[Any] = 1 / loss
logger.info(
'Masking: current score: %f, remaining heads %d (%.1f percents)' , lowerCamelCase_ , new_head_mask.sum() , new_head_mask.sum() / new_head_mask.numel() * 1_0_0 , )
logger.info('Final head mask' )
print_ad_tensor(lowerCamelCase_ )
np.save(os.path.join(args.output_dir , 'head_mask.npy' ) , head_mask.detach().cpu().numpy() )
return head_mask
def UpperCAmelCase__ ( lowerCamelCase_ : Dict , lowerCamelCase_ : Tuple , lowerCamelCase_ : Any , lowerCamelCase_ : Union[str, Any] ):
__a : List[Any] = datetime.now()
__a , __a , __a : List[str] = compute_heads_importance(
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , compute_entropy=lowerCamelCase_ , compute_importance=lowerCamelCase_ , head_mask=lowerCamelCase_ )
__a : List[str] = 1 / loss
__a : List[Any] = datetime.now() - before_time
__a : List[str] = sum(p.numel() for p in model.parameters() )
__a : Dict = {
layer: (1 - head_mask[layer].long()).nonzero().squeeze().tolist() for layer in range(len(lowerCamelCase_ ) )
}
for k, v in heads_to_prune.items():
if isinstance(lowerCamelCase_ , lowerCamelCase_ ):
__a : Tuple = [
v,
]
assert sum(len(lowerCamelCase_ ) for h in heads_to_prune.values() ) == (1 - head_mask.long()).sum().item()
model.prune_heads(lowerCamelCase_ )
__a : Optional[Any] = sum(p.numel() for p in model.parameters() )
__a : Tuple = datetime.now()
__a , __a , __a : Tuple = compute_heads_importance(
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , compute_entropy=lowerCamelCase_ , compute_importance=lowerCamelCase_ , head_mask=lowerCamelCase_ , actually_pruned=lowerCamelCase_ , )
__a : Optional[Any] = 1 / loss
__a : List[Any] = datetime.now() - before_time
logger.info(
'Pruning: original num of params: %.2e, after pruning %.2e (%.1f percents)' , lowerCamelCase_ , lowerCamelCase_ , pruned_num_params / original_num_params * 1_0_0 , )
logger.info('Pruning: score with masking: %f score with pruning: %f' , lowerCamelCase_ , lowerCamelCase_ )
logger.info('Pruning: speed ratio (original timing / new timing): %f percents' , original_time / new_time * 1_0_0 )
save_model(lowerCamelCase_ , args.output_dir )
def UpperCAmelCase__ ( ):
__a : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--data_dir' , default=lowerCamelCase_ , type=lowerCamelCase_ , required=lowerCamelCase_ , help='The input data dir. Should contain the .tsv files (or other data files) for the task.' , )
parser.add_argument(
'--model_name_or_path' , default=lowerCamelCase_ , type=lowerCamelCase_ , required=lowerCamelCase_ , help='Path to pretrained model or model identifier from huggingface.co/models' , )
parser.add_argument(
'--output_dir' , default=lowerCamelCase_ , type=lowerCamelCase_ , required=lowerCamelCase_ , help='The output directory where the model predictions and checkpoints will be written.' , )
# Other parameters
parser.add_argument(
'--config_name' , default='' , type=lowerCamelCase_ , help='Pretrained config name or path if not the same as model_name_or_path' , )
parser.add_argument(
'--tokenizer_name' , default='' , type=lowerCamelCase_ , help='Pretrained tokenizer name or path if not the same as model_name_or_path' , )
parser.add_argument(
'--cache_dir' , default=lowerCamelCase_ , type=lowerCamelCase_ , help='Where do you want to store the pre-trained models downloaded from s3' , )
parser.add_argument(
'--data_subset' , type=lowerCamelCase_ , default=-1 , help='If > 0: limit the data to a subset of data_subset instances.' )
parser.add_argument(
'--overwrite_output_dir' , action='store_true' , help='Whether to overwrite data in output directory' )
parser.add_argument(
'--overwrite_cache' , action='store_true' , help='Overwrite the cached training and evaluation sets' )
parser.add_argument(
'--dont_normalize_importance_by_layer' , action='store_true' , help='Don\'t normalize importance score by layers' )
parser.add_argument(
'--dont_normalize_global_importance' , action='store_true' , help='Don\'t normalize all importance scores between 0 and 1' , )
parser.add_argument(
'--try_masking' , action='store_true' , help='Whether to try to mask head until a threshold of accuracy.' )
parser.add_argument(
'--masking_threshold' , default=0.9 , type=lowerCamelCase_ , help='masking threshold in term of metrics (stop masking when metric < threshold * original metric value).' , )
parser.add_argument(
'--masking_amount' , default=0.1 , type=lowerCamelCase_ , help='Amount to heads to masking at each masking step.' )
parser.add_argument('--metric_name' , default='acc' , type=lowerCamelCase_ , help='Metric to use for head masking.' )
parser.add_argument(
'--max_seq_length' , default=1_2_8 , type=lowerCamelCase_ , help=(
'The maximum total input sequence length after WordPiece tokenization. \n'
'Sequences longer than this will be truncated, sequences shorter padded.'
) , )
parser.add_argument('--batch_size' , default=1 , type=lowerCamelCase_ , help='Batch size.' )
parser.add_argument('--seed' , type=lowerCamelCase_ , default=4_2 )
parser.add_argument('--local_rank' , type=lowerCamelCase_ , default=-1 , help='local_rank for distributed training on gpus' )
parser.add_argument('--no_cuda' , action='store_true' , help='Whether not to use CUDA when available' )
parser.add_argument('--server_ip' , type=lowerCamelCase_ , default='' , help='Can be used for distant debugging.' )
parser.add_argument('--server_port' , type=lowerCamelCase_ , default='' , help='Can be used for distant debugging.' )
__a : Optional[Any] = parser.parse_args()
if args.server_ip and args.server_port:
# Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script
import ptvsd
print('Waiting for debugger attach' )
ptvsd.enable_attach(address=(args.server_ip, args.server_port) , redirect_output=lowerCamelCase_ )
ptvsd.wait_for_attach()
# Setup devices and distributed training
if args.local_rank == -1 or args.no_cuda:
__a : List[str] = torch.device('cuda' if torch.cuda.is_available() and not args.no_cuda else 'cpu' )
__a : Tuple = 0 if args.no_cuda else torch.cuda.device_count()
else:
torch.cuda.set_device(args.local_rank )
__a : Union[str, Any] = torch.device('cuda' , args.local_rank )
__a : Any = 1
torch.distributed.init_process_group(backend='nccl' ) # Initializes the distributed backend
# Setup logging
logging.basicConfig(level=logging.INFO if args.local_rank in [-1, 0] else logging.WARN )
logger.info('device: {} n_gpu: {}, distributed: {}'.format(args.device , args.n_gpu , bool(args.local_rank != -1 ) ) )
__a : Optional[Any] = GPTaLMHeadModel.from_pretrained(args.model_name_or_path )
# Distributed and parallel training
model.to(args.device )
if args.local_rank != -1:
__a : List[Any] = nn.parallel.DistributedDataParallel(
lowerCamelCase_ , device_ids=[args.local_rank] , output_device=args.local_rank , find_unused_parameters=lowerCamelCase_ )
elif args.n_gpu > 1:
__a : Union[str, Any] = nn.DataParallel(lowerCamelCase_ )
# Print/save training arguments
os.makedirs(args.output_dir , exist_ok=lowerCamelCase_ )
torch.save(lowerCamelCase_ , os.path.join(args.output_dir , 'run_args.bin' ) )
logger.info('Training/evaluation parameters %s' , lowerCamelCase_ )
# Prepare dataset
__a : Tuple = np.concatenate(
[
np.loadtxt(args.data_dir , dtype=np.intaa ),
] )
__a : str = (torch.from_numpy(lowerCamelCase_ ),)
__a : List[str] = TensorDataset(*lowerCamelCase_ )
__a : Optional[Any] = RandomSampler(lowerCamelCase_ )
__a : Union[str, Any] = DataLoader(lowerCamelCase_ , sampler=lowerCamelCase_ , batch_size=args.batch_size )
# Compute head entropy and importance score
compute_heads_importance(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
# Try head masking (set heads to zero until the score goes under a threshole)
# and head pruning (remove masked heads and see the effect on the network)
if args.try_masking and args.masking_threshold > 0.0 and args.masking_threshold < 1.0:
__a : Union[str, Any] = mask_heads(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
prune_heads(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
if __name__ == "__main__":
main()
| 47 | 1 |
def UpperCAmelCase__ ( lowerCamelCase_ : int ):
if n == 1 or not isinstance(lowerCamelCase_ , lowerCamelCase_ ):
return 0
elif n == 2:
return 1
else:
__a : Any = [0, 1]
for i in range(2 , n + 1 ):
sequence.append(sequence[i - 1] + sequence[i - 2] )
return sequence[n]
def UpperCAmelCase__ ( lowerCamelCase_ : int ):
__a : Dict = 0
__a : str = 2
while digits < n:
index += 1
__a : Tuple = len(str(fibonacci(lowerCamelCase_ ) ) )
return index
def UpperCAmelCase__ ( lowerCamelCase_ : int = 1_0_0_0 ):
return fibonacci_digits_index(lowerCamelCase_ )
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 47 |
import argparse
import os
import gluonnlp as nlp
import mxnet as mx
import numpy as np
import torch
from gluonnlp.base import get_home_dir
from gluonnlp.model.bert import BERTEncoder
from gluonnlp.model.utils import _load_vocab
from gluonnlp.vocab import Vocab
from packaging import version
from torch import nn
from transformers import BertConfig, BertForMaskedLM, BertModel, RobertaTokenizer
from transformers.models.bert.modeling_bert import (
BertIntermediate,
BertLayer,
BertOutput,
BertSelfAttention,
BertSelfOutput,
)
from transformers.utils import logging
if version.parse(nlp.__version__) != version.parse('''0.8.3'''):
raise Exception('''requires gluonnlp == 0.8.3''')
if version.parse(mx.__version__) != version.parse('''1.5.0'''):
raise Exception('''requires mxnet == 1.5.0''')
logging.set_verbosity_info()
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ = '''The Nymphenburg Palace is a beautiful palace in Munich!'''
def UpperCAmelCase__ ( lowerCamelCase_ : str , lowerCamelCase_ : str ):
__a : List[Any] = {
'attention_cell': 'multi_head',
'num_layers': 4,
'units': 1_0_2_4,
'hidden_size': 7_6_8,
'max_length': 5_1_2,
'num_heads': 8,
'scaled': True,
'dropout': 0.1,
'use_residual': True,
'embed_size': 1_0_2_4,
'embed_dropout': 0.1,
'word_embed': None,
'layer_norm_eps': 1e-5,
'token_type_vocab_size': 2,
}
__a : Optional[int] = bort_4_8_768_1024_hparams
# Let's construct the original Bort model here
# Taken from official BERT implementation, see:
# https://github.com/alexa/bort/blob/master/bort/bort.py
__a : List[str] = BERTEncoder(
attention_cell=predefined_args['attention_cell'] , num_layers=predefined_args['num_layers'] , units=predefined_args['units'] , hidden_size=predefined_args['hidden_size'] , max_length=predefined_args['max_length'] , num_heads=predefined_args['num_heads'] , scaled=predefined_args['scaled'] , dropout=predefined_args['dropout'] , output_attention=lowerCamelCase_ , output_all_encodings=lowerCamelCase_ , use_residual=predefined_args['use_residual'] , activation=predefined_args.get('activation' , 'gelu' ) , layer_norm_eps=predefined_args.get('layer_norm_eps' , lowerCamelCase_ ) , )
# Vocab information needs to be fetched first
# It's the same as RoBERTa, so RobertaTokenizer can be used later
__a : int = 'openwebtext_ccnews_stories_books_cased'
# Specify download folder to Gluonnlp's vocab
__a : Optional[Any] = os.path.join(get_home_dir() , 'models' )
__a : Optional[Any] = _load_vocab(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , cls=lowerCamelCase_ )
__a : Any = nlp.model.BERTModel(
lowerCamelCase_ , len(lowerCamelCase_ ) , units=predefined_args['units'] , embed_size=predefined_args['embed_size'] , embed_dropout=predefined_args['embed_dropout'] , word_embed=predefined_args['word_embed'] , use_pooler=lowerCamelCase_ , use_token_type_embed=lowerCamelCase_ , token_type_vocab_size=predefined_args['token_type_vocab_size'] , use_classifier=lowerCamelCase_ , use_decoder=lowerCamelCase_ , )
original_bort.load_parameters(lowerCamelCase_ , cast_dtype=lowerCamelCase_ , ignore_extra=lowerCamelCase_ )
__a : Dict = original_bort._collect_params_with_prefix()
# Build our config 🤗
__a : Optional[Any] = {
'architectures': ['BertForMaskedLM'],
'attention_probs_dropout_prob': predefined_args['dropout'],
'hidden_act': 'gelu',
'hidden_dropout_prob': predefined_args['dropout'],
'hidden_size': predefined_args['embed_size'],
'initializer_range': 0.02,
'intermediate_size': predefined_args['hidden_size'],
'layer_norm_eps': predefined_args['layer_norm_eps'],
'max_position_embeddings': predefined_args['max_length'],
'model_type': 'bort',
'num_attention_heads': predefined_args['num_heads'],
'num_hidden_layers': predefined_args['num_layers'],
'pad_token_id': 1, # 2 = BERT, 1 = RoBERTa
'type_vocab_size': 1, # 2 = BERT, 1 = RoBERTa
'vocab_size': len(lowerCamelCase_ ),
}
__a : str = BertConfig.from_dict(lowerCamelCase_ )
__a : Optional[int] = BertForMaskedLM(lowerCamelCase_ )
hf_bort_model.eval()
# Parameter mapping table (Gluonnlp to Transformers)
# * denotes layer index
#
# | Gluon Parameter | Transformers Parameter
# | -------------------------------------------------------------- | ----------------------
# | `encoder.layer_norm.beta` | `bert.embeddings.LayerNorm.bias`
# | `encoder.layer_norm.gamma` | `bert.embeddings.LayerNorm.weight`
# | `encoder.position_weight` | `bert.embeddings.position_embeddings.weight`
# | `word_embed.0.weight` | `bert.embeddings.word_embeddings.weight`
# | `encoder.transformer_cells.*.attention_cell.proj_key.bias` | `bert.encoder.layer.*.attention.self.key.bias`
# | `encoder.transformer_cells.*.attention_cell.proj_key.weight` | `bert.encoder.layer.*.attention.self.key.weight`
# | `encoder.transformer_cells.*.attention_cell.proj_query.bias` | `bert.encoder.layer.*.attention.self.query.bias`
# | `encoder.transformer_cells.*.attention_cell.proj_query.weight` | `bert.encoder.layer.*.attention.self.query.weight`
# | `encoder.transformer_cells.*.attention_cell.proj_value.bias` | `bert.encoder.layer.*.attention.self.value.bias`
# | `encoder.transformer_cells.*.attention_cell.proj_value.weight` | `bert.encoder.layer.*.attention.self.value.weight`
# | `encoder.transformer_cells.*.ffn.ffn_2.bias` | `bert.encoder.layer.*.attention.output.dense.bias`
# | `encoder.transformer_cells.*.ffn.ffn_2.weight` | `bert.encoder.layer.*.attention.output.dense.weight`
# | `encoder.transformer_cells.*.layer_norm.beta` | `bert.encoder.layer.*.attention.output.LayerNorm.bias`
# | `encoder.transformer_cells.*.layer_norm.gamma` | `bert.encoder.layer.*.attention.output.LayerNorm.weight`
# | `encoder.transformer_cells.*.ffn.ffn_1.bias` | `bert.encoder.layer.*.intermediate.dense.bias`
# | `encoder.transformer_cells.*.ffn.ffn_1.weight` | `bert.encoder.layer.*.intermediate.dense.weight`
# | `encoder.transformer_cells.*.ffn.layer_norm.beta` | `bert.encoder.layer.*.output.LayerNorm.bias`
# | `encoder.transformer_cells.*.ffn.layer_norm.gamma` | `bert.encoder.layer.*.output.LayerNorm.weight`
# | `encoder.transformer_cells.*.proj.bias` | `bert.encoder.layer.*.output.dense.bias`
# | `encoder.transformer_cells.*.proj.weight` | `bert.encoder.layer.*.output.dense.weight`
# Helper function to convert MXNET Arrays to PyTorch
def to_torch(lowerCamelCase_ : Optional[Any] ) -> nn.Parameter:
return nn.Parameter(torch.FloatTensor(mx_array.data().asnumpy() ) )
# Check param shapes and map new HF param back
def check_and_map_params(lowerCamelCase_ : Optional[Any] , lowerCamelCase_ : List[str] ):
__a : Optional[int] = hf_param.shape
__a : int = to_torch(params[gluon_param] )
__a : int = gluon_param.shape
assert (
shape_hf == shape_gluon
), f'''The gluon parameter {gluon_param} has shape {shape_gluon}, but expects shape {shape_hf} for Transformers'''
return gluon_param
__a : str = check_and_map_params(
hf_bort_model.bert.embeddings.word_embeddings.weight , 'word_embed.0.weight' )
__a : str = check_and_map_params(
hf_bort_model.bert.embeddings.position_embeddings.weight , 'encoder.position_weight' )
__a : Tuple = check_and_map_params(
hf_bort_model.bert.embeddings.LayerNorm.bias , 'encoder.layer_norm.beta' )
__a : Union[str, Any] = check_and_map_params(
hf_bort_model.bert.embeddings.LayerNorm.weight , 'encoder.layer_norm.gamma' )
# Inspired by RoBERTa conversion script, we just zero them out (Bort does not use them)
__a : Union[str, Any] = torch.zeros_like(
hf_bort_model.bert.embeddings.token_type_embeddings.weight.data )
for i in range(hf_bort_config.num_hidden_layers ):
__a : BertLayer = hf_bort_model.bert.encoder.layer[i]
# self attention
__a : BertSelfAttention = layer.attention.self
__a : Optional[int] = check_and_map_params(
self_attn.key.bias.data , f'''encoder.transformer_cells.{i}.attention_cell.proj_key.bias''' )
__a : str = check_and_map_params(
self_attn.key.weight.data , f'''encoder.transformer_cells.{i}.attention_cell.proj_key.weight''' )
__a : List[str] = check_and_map_params(
self_attn.query.bias.data , f'''encoder.transformer_cells.{i}.attention_cell.proj_query.bias''' )
__a : str = check_and_map_params(
self_attn.query.weight.data , f'''encoder.transformer_cells.{i}.attention_cell.proj_query.weight''' )
__a : Dict = check_and_map_params(
self_attn.value.bias.data , f'''encoder.transformer_cells.{i}.attention_cell.proj_value.bias''' )
__a : str = check_and_map_params(
self_attn.value.weight.data , f'''encoder.transformer_cells.{i}.attention_cell.proj_value.weight''' )
# self attention output
__a : BertSelfOutput = layer.attention.output
__a : Tuple = check_and_map_params(
self_output.dense.bias , f'''encoder.transformer_cells.{i}.proj.bias''' )
__a : Dict = check_and_map_params(
self_output.dense.weight , f'''encoder.transformer_cells.{i}.proj.weight''' )
__a : Optional[Any] = check_and_map_params(
self_output.LayerNorm.bias , f'''encoder.transformer_cells.{i}.layer_norm.beta''' )
__a : Optional[Any] = check_and_map_params(
self_output.LayerNorm.weight , f'''encoder.transformer_cells.{i}.layer_norm.gamma''' )
# intermediate
__a : BertIntermediate = layer.intermediate
__a : List[str] = check_and_map_params(
intermediate.dense.bias , f'''encoder.transformer_cells.{i}.ffn.ffn_1.bias''' )
__a : Optional[Any] = check_and_map_params(
intermediate.dense.weight , f'''encoder.transformer_cells.{i}.ffn.ffn_1.weight''' )
# output
__a : BertOutput = layer.output
__a : str = check_and_map_params(
bert_output.dense.bias , f'''encoder.transformer_cells.{i}.ffn.ffn_2.bias''' )
__a : List[Any] = check_and_map_params(
bert_output.dense.weight , f'''encoder.transformer_cells.{i}.ffn.ffn_2.weight''' )
__a : str = check_and_map_params(
bert_output.LayerNorm.bias , f'''encoder.transformer_cells.{i}.ffn.layer_norm.beta''' )
__a : List[str] = check_and_map_params(
bert_output.LayerNorm.weight , f'''encoder.transformer_cells.{i}.ffn.layer_norm.gamma''' )
# Save space and energy 🎄
hf_bort_model.half()
# Compare output of both models
__a : Union[str, Any] = RobertaTokenizer.from_pretrained('roberta-base' )
__a : Union[str, Any] = tokenizer.encode_plus(lowerCamelCase_ )['input_ids']
# Get gluon output
__a : Optional[int] = mx.nd.array([input_ids] )
__a : Tuple = original_bort(inputs=lowerCamelCase_ , token_types=[] )
# Get Transformer output (save and reload model again)
hf_bort_model.save_pretrained(lowerCamelCase_ )
__a : Optional[Any] = BertModel.from_pretrained(lowerCamelCase_ )
hf_bort_model.eval()
__a : Union[str, Any] = tokenizer.encode_plus(lowerCamelCase_ , return_tensors='pt' )
__a : int = hf_bort_model(**lowerCamelCase_ )[0]
__a : Dict = output_gluon[0].asnumpy()
__a : str = output_hf[0].detach().numpy()
__a : List[Any] = np.max(np.abs(hf_layer - gluon_layer ) ).item()
__a : str = np.allclose(lowerCamelCase_ , lowerCamelCase_ , atol=1e-3 )
if success:
print('✔️ Both model do output the same tensors' )
else:
print('❌ Both model do **NOT** output the same tensors' )
print('Absolute difference is:' , lowerCamelCase_ )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--bort_checkpoint_path''', default=None, type=str, required=True, help='''Path the official Bort params file.'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
SCREAMING_SNAKE_CASE__ = parser.parse_args()
convert_bort_checkpoint_to_pytorch(args.bort_checkpoint_path, args.pytorch_dump_folder_path)
| 47 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ = {
'''unc-nlp/lxmert-base-uncased''': '''https://huggingface.co/unc-nlp/lxmert-base-uncased/resolve/main/config.json''',
}
class _UpperCamelCase( __lowerCamelCase ):
__SCREAMING_SNAKE_CASE : List[Any] = '''lxmert'''
__SCREAMING_SNAKE_CASE : str = {}
def __init__( self : List[Any] , SCREAMING_SNAKE_CASE__ : List[str]=3_0_5_2_2 , SCREAMING_SNAKE_CASE__ : List[str]=7_6_8 , SCREAMING_SNAKE_CASE__ : List[str]=1_2 , SCREAMING_SNAKE_CASE__ : Optional[Any]=9_5_0_0 , SCREAMING_SNAKE_CASE__ : List[Any]=1_6_0_0 , SCREAMING_SNAKE_CASE__ : Dict=4_0_0 , SCREAMING_SNAKE_CASE__ : List[Any]=3_0_7_2 , SCREAMING_SNAKE_CASE__ : Union[str, Any]="gelu" , SCREAMING_SNAKE_CASE__ : Optional[int]=0.1 , SCREAMING_SNAKE_CASE__ : Optional[int]=0.1 , SCREAMING_SNAKE_CASE__ : Any=5_1_2 , SCREAMING_SNAKE_CASE__ : Optional[Any]=2 , SCREAMING_SNAKE_CASE__ : Tuple=0.02 , SCREAMING_SNAKE_CASE__ : Optional[Any]=1e-12 , SCREAMING_SNAKE_CASE__ : str=9 , SCREAMING_SNAKE_CASE__ : Optional[Any]=5 , SCREAMING_SNAKE_CASE__ : Optional[Any]=5 , SCREAMING_SNAKE_CASE__ : List[str]=2_0_4_8 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=4 , SCREAMING_SNAKE_CASE__ : Optional[int]=6.67 , SCREAMING_SNAKE_CASE__ : Tuple=True , SCREAMING_SNAKE_CASE__ : Optional[int]=True , SCREAMING_SNAKE_CASE__ : Optional[Any]=True , SCREAMING_SNAKE_CASE__ : List[Any]=True , SCREAMING_SNAKE_CASE__ : List[Any]=True , SCREAMING_SNAKE_CASE__ : List[str]=True , SCREAMING_SNAKE_CASE__ : Any=True , **SCREAMING_SNAKE_CASE__ : List[Any] , ):
'''simple docstring'''
__a : Optional[int] = vocab_size
__a : List[Any] = hidden_size
__a : Tuple = num_attention_heads
__a : Tuple = hidden_act
__a : Dict = intermediate_size
__a : List[str] = hidden_dropout_prob
__a : int = attention_probs_dropout_prob
__a : Optional[int] = max_position_embeddings
__a : int = type_vocab_size
__a : Any = initializer_range
__a : Union[str, Any] = layer_norm_eps
__a : List[str] = num_qa_labels
__a : List[Any] = num_object_labels
__a : int = num_attr_labels
__a : Optional[Any] = l_layers
__a : int = x_layers
__a : int = r_layers
__a : str = visual_feat_dim
__a : str = visual_pos_dim
__a : Dict = visual_loss_normalizer
__a : Tuple = task_matched
__a : Optional[Any] = task_mask_lm
__a : str = task_obj_predict
__a : List[str] = task_qa
__a : Optional[Any] = visual_obj_loss
__a : Tuple = visual_attr_loss
__a : Tuple = visual_feat_loss
__a : List[str] = {'vision': r_layers, 'cross_encoder': x_layers, 'language': l_layers}
super().__init__(**SCREAMING_SNAKE_CASE__ )
| 47 |
def UpperCAmelCase__ ( lowerCamelCase_ : Optional[int] , lowerCamelCase_ : List[str] ):
__a : Any = ''
for i in table:
res += inp[i - 1]
return res
def UpperCAmelCase__ ( lowerCamelCase_ : Optional[Any] ):
return data[1:] + data[0]
def UpperCAmelCase__ ( lowerCamelCase_ : Optional[Any] , lowerCamelCase_ : Optional[int] ):
__a : Optional[int] = ''
for i in range(len(lowerCamelCase_ ) ):
if a[i] == b[i]:
res += "0"
else:
res += "1"
return res
def UpperCAmelCase__ ( lowerCamelCase_ : List[Any] , lowerCamelCase_ : str ):
__a : List[str] = int('0b' + data[0] + data[-1] , 2 )
__a : List[str] = int('0b' + data[1:3] , 2 )
return bin(s[row][col] )[2:]
def UpperCAmelCase__ ( lowerCamelCase_ : Any , lowerCamelCase_ : List[str] , lowerCamelCase_ : int , lowerCamelCase_ : int , lowerCamelCase_ : Optional[Any] ):
__a : List[Any] = message[:4]
__a : str = message[4:]
__a : Any = apply_table(lowerCamelCase_ , lowerCamelCase_ )
__a : int = xor(lowerCamelCase_ , lowerCamelCase_ )
__a : Dict = apply_sbox(lowerCamelCase_ , temp[:4] ) # noqa: E741
__a : Tuple = apply_sbox(lowerCamelCase_ , temp[4:] )
__a : List[Any] = '0' * (2 - len(lowerCamelCase_ )) + l # noqa: E741
__a : List[str] = '0' * (2 - len(lowerCamelCase_ )) + r
__a : List[Any] = apply_table(l + r , lowerCamelCase_ )
__a : Dict = xor(lowerCamelCase_ , lowerCamelCase_ )
return temp + right
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ = input('''Enter 10 bit key: ''')
SCREAMING_SNAKE_CASE__ = input('''Enter 8 bit message: ''')
SCREAMING_SNAKE_CASE__ = [6, 3, 7, 4, 8, 5, 10, 9]
SCREAMING_SNAKE_CASE__ = [3, 5, 2, 7, 4, 10, 1, 9, 8, 6]
SCREAMING_SNAKE_CASE__ = [2, 4, 3, 1]
SCREAMING_SNAKE_CASE__ = [2, 6, 3, 1, 4, 8, 5, 7]
SCREAMING_SNAKE_CASE__ = [4, 1, 3, 5, 7, 2, 8, 6]
SCREAMING_SNAKE_CASE__ = [4, 1, 2, 3, 2, 3, 4, 1]
SCREAMING_SNAKE_CASE__ = [[1, 0, 3, 2], [3, 2, 1, 0], [0, 2, 1, 3], [3, 1, 3, 2]]
SCREAMING_SNAKE_CASE__ = [[0, 1, 2, 3], [2, 0, 1, 3], [3, 0, 1, 0], [2, 1, 0, 3]]
# key generation
SCREAMING_SNAKE_CASE__ = apply_table(key, paa_table)
SCREAMING_SNAKE_CASE__ = temp[:5]
SCREAMING_SNAKE_CASE__ = temp[5:]
SCREAMING_SNAKE_CASE__ = left_shift(left)
SCREAMING_SNAKE_CASE__ = left_shift(right)
SCREAMING_SNAKE_CASE__ = apply_table(left + right, pa_table)
SCREAMING_SNAKE_CASE__ = left_shift(left)
SCREAMING_SNAKE_CASE__ = left_shift(right)
SCREAMING_SNAKE_CASE__ = left_shift(left)
SCREAMING_SNAKE_CASE__ = left_shift(right)
SCREAMING_SNAKE_CASE__ = apply_table(left + right, pa_table)
# encryption
SCREAMING_SNAKE_CASE__ = apply_table(message, IP)
SCREAMING_SNAKE_CASE__ = function(expansion, sa, sa, keya, temp)
SCREAMING_SNAKE_CASE__ = temp[4:] + temp[:4]
SCREAMING_SNAKE_CASE__ = function(expansion, sa, sa, keya, temp)
SCREAMING_SNAKE_CASE__ = apply_table(temp, IP_inv)
print('''Cipher text is:''', CT)
# decryption
SCREAMING_SNAKE_CASE__ = apply_table(CT, IP)
SCREAMING_SNAKE_CASE__ = function(expansion, sa, sa, keya, temp)
SCREAMING_SNAKE_CASE__ = temp[4:] + temp[:4]
SCREAMING_SNAKE_CASE__ = function(expansion, sa, sa, keya, temp)
SCREAMING_SNAKE_CASE__ = apply_table(temp, IP_inv)
print('''Plain text after decypting is:''', PT)
| 47 | 1 |
def UpperCAmelCase__ ( lowerCamelCase_ : str , lowerCamelCase_ : str ):
__a : str = len(lowerCamelCase_ )
__a : Optional[int] = len(lowerCamelCase_ )
__a : Union[str, Any] = [[False for _ in range(m + 1 )] for _ in range(n + 1 )]
__a : List[Any] = True
for i in range(lowerCamelCase_ ):
for j in range(m + 1 ):
if dp[i][j]:
if j < m and a[i].upper() == b[j]:
__a : Tuple = True
if a[i].islower():
__a : Dict = True
return dp[n][m]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 47 |
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_tf
if is_tf_available():
import tensorflow as tf
from tensorflow.python.eager import context
from tensorflow.python.framework import ops
from transformers import GradientAccumulator, create_optimizer
@require_tf
class _UpperCamelCase( unittest.TestCase ):
def __lowerCAmelCase ( self : List[str] , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : Union[str, Any] ):
'''simple docstring'''
self.assertEqual(len(SCREAMING_SNAKE_CASE__ ) , len(SCREAMING_SNAKE_CASE__ ) )
for a, b in zip(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
self.assertAlmostEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , delta=SCREAMING_SNAKE_CASE__ )
def __lowerCAmelCase ( self : Any ):
'''simple docstring'''
__a : List[Any] = GradientAccumulator()
accumulator([tf.constant([1.0, 2.0] )] )
accumulator([tf.constant([-2.0, 1.0] )] )
accumulator([tf.constant([-1.0, 2.0] )] )
with self.assertRaises(SCREAMING_SNAKE_CASE__ ):
accumulator([tf.constant([1.0, 1.0] ), tf.constant([2.0, 2.0] )] )
self.assertEqual(accumulator.step , 3 )
self.assertEqual(len(accumulator.gradients ) , 1 )
self.assertListAlmostEqual(accumulator.gradients[0].numpy().tolist() , [-2.0, 5.0] , tol=1e-2 )
accumulator.reset()
self.assertEqual(accumulator.step , 0 )
self.assertListAlmostEqual(accumulator.gradients[0].numpy().tolist() , [0.0, 0.0] , tol=1e-2 )
def __lowerCAmelCase ( self : Dict ):
'''simple docstring'''
__a : int = None
ops.enable_eager_execution_internal()
__a : Optional[Any] = tf.config.list_physical_devices('CPU' )
if len(SCREAMING_SNAKE_CASE__ ) == 1:
tf.config.set_logical_device_configuration(
physical_devices[0] , [tf.config.LogicalDeviceConfiguration(), tf.config.LogicalDeviceConfiguration()] )
__a : int = tf.config.list_logical_devices(device_type='CPU' )
__a : str = tf.distribute.MirroredStrategy(devices=devices[:2] )
with strategy.scope():
__a : List[str] = GradientAccumulator()
__a : Tuple = tf.Variable([4.0, 3.0] )
__a , __a : int = create_optimizer(5e-5 , 1_0 , 5 )
__a : List[Any] = tf.Variable([0.0, 0.0] , trainable=SCREAMING_SNAKE_CASE__ )
def accumulate_on_replica(SCREAMING_SNAKE_CASE__ : Optional[Any] ):
accumulator([gradient] )
def apply_on_replica():
optimizer.apply_gradients(list(zip(accumulator.gradients , [variable] ) ) )
@tf.function
def accumulate(SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Tuple ):
with strategy.scope():
__a : Optional[Any] = strategy.experimental_local_results(SCREAMING_SNAKE_CASE__ )
local_variables[0].assign(SCREAMING_SNAKE_CASE__ )
local_variables[1].assign(SCREAMING_SNAKE_CASE__ )
strategy.run(SCREAMING_SNAKE_CASE__ , args=(gradient_placeholder,) )
@tf.function
def apply_grad():
with strategy.scope():
strategy.run(SCREAMING_SNAKE_CASE__ )
def _check_local_values(SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : int ):
__a : Union[str, Any] = strategy.experimental_local_results(accumulator._gradients[0] )
self.assertListAlmostEqual(values[0].value() , SCREAMING_SNAKE_CASE__ , tol=1e-2 )
self.assertListAlmostEqual(values[1].value() , SCREAMING_SNAKE_CASE__ , tol=1e-2 )
accumulate([1.0, 2.0] , [-1.0, 1.0] )
accumulate([3.0, -1.0] , [-1.0, -1.0] )
accumulate([-2.0, 2.0] , [3.0, -2.0] )
self.assertEqual(accumulator.step , 3 )
_check_local_values([2.0, 3.0] , [1.0, -2.0] )
apply_grad()
self.assertListAlmostEqual(variable.value() , [4.0, 3.0] , tol=1e-2 )
accumulator.reset()
self.assertEqual(accumulator.step , 0 )
_check_local_values([0.0, 0.0] , [0.0, 0.0] )
| 47 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
SCREAMING_SNAKE_CASE__ = {
'''configuration_pegasus_x''': ['''PEGASUS_X_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''PegasusXConfig'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ = [
'''PEGASUS_X_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''PegasusXForConditionalGeneration''',
'''PegasusXModel''',
'''PegasusXPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_pegasus_x import PEGASUS_X_PRETRAINED_CONFIG_ARCHIVE_MAP, PegasusXConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_pegasus_x import (
PEGASUS_X_PRETRAINED_MODEL_ARCHIVE_LIST,
PegasusXForConditionalGeneration,
PegasusXModel,
PegasusXPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 47 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ = {
'''roberta-base''': '''https://huggingface.co/roberta-base/resolve/main/config.json''',
'''roberta-large''': '''https://huggingface.co/roberta-large/resolve/main/config.json''',
'''roberta-large-mnli''': '''https://huggingface.co/roberta-large-mnli/resolve/main/config.json''',
'''distilroberta-base''': '''https://huggingface.co/distilroberta-base/resolve/main/config.json''',
'''roberta-base-openai-detector''': '''https://huggingface.co/roberta-base-openai-detector/resolve/main/config.json''',
'''roberta-large-openai-detector''': '''https://huggingface.co/roberta-large-openai-detector/resolve/main/config.json''',
}
class _UpperCamelCase( __lowerCamelCase ):
__SCREAMING_SNAKE_CASE : Union[str, Any] = '''roberta'''
def __init__( self : List[Any] , SCREAMING_SNAKE_CASE__ : Union[str, Any]=5_0_2_6_5 , SCREAMING_SNAKE_CASE__ : Optional[int]=7_6_8 , SCREAMING_SNAKE_CASE__ : str=1_2 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=1_2 , SCREAMING_SNAKE_CASE__ : Optional[Any]=3_0_7_2 , SCREAMING_SNAKE_CASE__ : Any="gelu" , SCREAMING_SNAKE_CASE__ : Optional[Any]=0.1 , SCREAMING_SNAKE_CASE__ : str=0.1 , SCREAMING_SNAKE_CASE__ : Optional[Any]=5_1_2 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=2 , SCREAMING_SNAKE_CASE__ : Any=0.02 , SCREAMING_SNAKE_CASE__ : List[str]=1e-12 , SCREAMING_SNAKE_CASE__ : Optional[Any]=1 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=0 , SCREAMING_SNAKE_CASE__ : List[str]=2 , SCREAMING_SNAKE_CASE__ : Tuple="absolute" , SCREAMING_SNAKE_CASE__ : Any=True , SCREAMING_SNAKE_CASE__ : List[str]=None , **SCREAMING_SNAKE_CASE__ : Any , ):
'''simple docstring'''
super().__init__(pad_token_id=SCREAMING_SNAKE_CASE__ , bos_token_id=SCREAMING_SNAKE_CASE__ , eos_token_id=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
__a : Optional[Any] = vocab_size
__a : Tuple = hidden_size
__a : List[str] = num_hidden_layers
__a : List[Any] = num_attention_heads
__a : str = hidden_act
__a : Optional[Any] = intermediate_size
__a : Dict = hidden_dropout_prob
__a : List[str] = attention_probs_dropout_prob
__a : Optional[Any] = max_position_embeddings
__a : Dict = type_vocab_size
__a : str = initializer_range
__a : List[str] = layer_norm_eps
__a : Optional[int] = position_embedding_type
__a : Union[str, Any] = use_cache
__a : str = classifier_dropout
class _UpperCamelCase( __lowerCamelCase ):
@property
def __lowerCAmelCase ( self : Any ):
'''simple docstring'''
if self.task == "multiple-choice":
__a : List[str] = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
__a : Dict = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
] )
| 47 | 1 |
import os
import posixpath
import uuid
from dataclasses import dataclass
from typing import TYPE_CHECKING, Iterable, List, Optional, Tuple, Union
import numpy as np
import pyarrow as pa
import datasets
from datasets.arrow_writer import ArrowWriter, ParquetWriter
from datasets.config import MAX_SHARD_SIZE
from datasets.filesystems import (
is_remote_filesystem,
rename,
)
from datasets.iterable_dataset import _BaseExamplesIterable
from datasets.utils.py_utils import convert_file_size_to_int
SCREAMING_SNAKE_CASE__ = datasets.utils.logging.get_logger(__name__)
if TYPE_CHECKING:
import pyspark
@dataclass
class _UpperCamelCase( datasets.BuilderConfig ):
__SCREAMING_SNAKE_CASE : Optional[datasets.Features] = None
def UpperCAmelCase__ ( lowerCamelCase_ : "pyspark.sql.DataFrame" , lowerCamelCase_ : List[int] , ):
import pyspark
def generate_fn():
__a : List[Any] = df.select('*' , pyspark.sql.functions.spark_partition_id().alias('part_id' ) )
for partition_id in partition_order:
__a : Optional[int] = df_with_partition_id.select('*' ).where(f'''part_id = {partition_id}''' ).drop('part_id' )
__a : Optional[Any] = partition_df.collect()
__a : Union[str, Any] = 0
for row in rows:
yield f'''{partition_id}_{row_id}''', row.asDict()
row_id += 1
return generate_fn
class _UpperCamelCase( _BaseExamplesIterable ):
def __init__( self : Any , SCREAMING_SNAKE_CASE__ : "pyspark.sql.DataFrame" , SCREAMING_SNAKE_CASE__ : Dict=None , ):
'''simple docstring'''
__a : List[str] = df
__a : Tuple = partition_order or range(self.df.rdd.getNumPartitions() )
__a : List[Any] = _generate_iterable_examples(self.df , self.partition_order )
def __iter__( self : Tuple ):
'''simple docstring'''
yield from self.generate_examples_fn()
def __lowerCAmelCase ( self : Optional[int] , SCREAMING_SNAKE_CASE__ : np.random.Generator ):
'''simple docstring'''
__a : Union[str, Any] = list(range(self.df.rdd.getNumPartitions() ) )
generator.shuffle(SCREAMING_SNAKE_CASE__ )
return SparkExamplesIterable(self.df , partition_order=SCREAMING_SNAKE_CASE__ )
def __lowerCAmelCase ( self : str , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int ):
'''simple docstring'''
__a : Union[str, Any] = self.split_shard_indices_by_worker(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
return SparkExamplesIterable(self.df , partition_order=SCREAMING_SNAKE_CASE__ )
@property
def __lowerCAmelCase ( self : Dict ):
'''simple docstring'''
return len(self.partition_order )
class _UpperCamelCase( datasets.DatasetBuilder ):
__SCREAMING_SNAKE_CASE : List[str] = SparkConfig
def __init__( self : List[str] , SCREAMING_SNAKE_CASE__ : "pyspark.sql.DataFrame" , SCREAMING_SNAKE_CASE__ : str = None , SCREAMING_SNAKE_CASE__ : str = None , **SCREAMING_SNAKE_CASE__ : Optional[int] , ):
'''simple docstring'''
import pyspark
__a : int = pyspark.sql.SparkSession.builder.getOrCreate()
__a : Optional[int] = df
__a : List[Any] = working_dir
super().__init__(
cache_dir=SCREAMING_SNAKE_CASE__ , config_name=str(self.df.semanticHash() ) , **SCREAMING_SNAKE_CASE__ , )
def __lowerCAmelCase ( self : Optional[int] ):
'''simple docstring'''
def create_cache_and_write_probe(SCREAMING_SNAKE_CASE__ : List[str] ):
# makedirs with exist_ok will recursively create the directory. It will not throw an error if directories
# already exist.
os.makedirs(self._cache_dir , exist_ok=SCREAMING_SNAKE_CASE__ )
__a : List[Any] = os.path.join(self._cache_dir , 'fs_test' + uuid.uuida().hex )
# Opening the file in append mode will create a new file unless it already exists, in which case it will not
# change the file contents.
open(SCREAMING_SNAKE_CASE__ , 'a' )
return [probe_file]
if self._spark.conf.get('spark.master' , '' ).startswith('local' ):
return
# If the cluster is multi-node, make sure that the user provided a cache_dir and that it is on an NFS
# accessible to the driver.
# TODO: Stream batches to the driver using ArrowCollectSerializer instead of throwing an error.
if self._cache_dir:
__a : List[Any] = (
self._spark.sparkContext.parallelize(range(1 ) , 1 ).mapPartitions(SCREAMING_SNAKE_CASE__ ).collect()
)
if os.path.isfile(probe[0] ):
return
raise ValueError(
'When using Dataset.from_spark on a multi-node cluster, the driver and all workers should be able to access cache_dir' )
def __lowerCAmelCase ( self : List[str] ):
'''simple docstring'''
return datasets.DatasetInfo(features=self.config.features )
def __lowerCAmelCase ( self : int , SCREAMING_SNAKE_CASE__ : datasets.download.download_manager.DownloadManager ):
'''simple docstring'''
return [datasets.SplitGenerator(name=datasets.Split.TRAIN )]
def __lowerCAmelCase ( self : List[str] , SCREAMING_SNAKE_CASE__ : Any ):
'''simple docstring'''
import pyspark
def get_arrow_batch_size(SCREAMING_SNAKE_CASE__ : int ):
for batch in it:
yield pa.RecordBatch.from_pydict({'batch_bytes': [batch.nbytes]} )
__a : List[str] = self.df.count()
__a : Dict = df_num_rows if df_num_rows <= 1_0_0 else 1_0_0
# Approximate the size of each row (in Arrow format) by averaging over a max-100-row sample.
__a : List[str] = (
self.df.limit(SCREAMING_SNAKE_CASE__ )
.repartition(1 )
.mapInArrow(SCREAMING_SNAKE_CASE__ , 'batch_bytes: long' )
.agg(pyspark.sql.functions.sum('batch_bytes' ).alias('sample_bytes' ) )
.collect()[0]
.sample_bytes
/ sample_num_rows
)
__a : Dict = approx_bytes_per_row * df_num_rows
if approx_total_size > max_shard_size:
# Make sure there is at least one row per partition.
__a : Union[str, Any] = min(SCREAMING_SNAKE_CASE__ , int(approx_total_size / max_shard_size ) )
__a : int = self.df.repartition(SCREAMING_SNAKE_CASE__ )
def __lowerCAmelCase ( self : Tuple , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : int , ):
'''simple docstring'''
import pyspark
__a : Any = ParquetWriter if file_format == 'parquet' else ArrowWriter
__a : Union[str, Any] = os.path.join(self._working_dir , os.path.basename(SCREAMING_SNAKE_CASE__ ) ) if self._working_dir else fpath
__a : Optional[int] = file_format == 'parquet'
# Define these so that we don't reference self in write_arrow, which will result in a pickling error due to
# pickling the SparkContext.
__a : List[str] = self.config.features
__a : int = self._writer_batch_size
__a : Union[str, Any] = self._fs.storage_options
def write_arrow(SCREAMING_SNAKE_CASE__ : Optional[int] ):
# Within the same SparkContext, no two task attempts will share the same attempt ID.
__a : Any = pyspark.TaskContext().taskAttemptId()
__a : str = next(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
if first_batch is None:
# Some partitions might not receive any data.
return pa.RecordBatch.from_arrays(
[[task_id], [0], [0]] , names=['task_id', 'num_examples', 'num_bytes'] , )
__a : Any = 0
__a : List[str] = writer_class(
features=SCREAMING_SNAKE_CASE__ , path=working_fpath.replace('SSSSS' , f'''{shard_id:05d}''' ).replace('TTTTT' , f'''{task_id:05d}''' ) , writer_batch_size=SCREAMING_SNAKE_CASE__ , storage_options=SCREAMING_SNAKE_CASE__ , embed_local_files=SCREAMING_SNAKE_CASE__ , )
__a : Optional[Any] = pa.Table.from_batches([first_batch] )
writer.write_table(SCREAMING_SNAKE_CASE__ )
for batch in it:
if max_shard_size is not None and writer._num_bytes >= max_shard_size:
__a , __a : Optional[int] = writer.finalize()
writer.close()
yield pa.RecordBatch.from_arrays(
[[task_id], [num_examples], [num_bytes]] , names=['task_id', 'num_examples', 'num_bytes'] , )
shard_id += 1
__a : Optional[Any] = writer_class(
features=writer._features , path=working_fpath.replace('SSSSS' , f'''{shard_id:05d}''' ).replace('TTTTT' , f'''{task_id:05d}''' ) , writer_batch_size=SCREAMING_SNAKE_CASE__ , storage_options=SCREAMING_SNAKE_CASE__ , embed_local_files=SCREAMING_SNAKE_CASE__ , )
__a : Union[str, Any] = pa.Table.from_batches([batch] )
writer.write_table(SCREAMING_SNAKE_CASE__ )
if writer._num_bytes > 0:
__a , __a : str = writer.finalize()
writer.close()
yield pa.RecordBatch.from_arrays(
[[task_id], [num_examples], [num_bytes]] , names=['task_id', 'num_examples', 'num_bytes'] , )
if working_fpath != fpath:
for file in os.listdir(os.path.dirname(SCREAMING_SNAKE_CASE__ ) ):
__a : Any = os.path.join(os.path.dirname(SCREAMING_SNAKE_CASE__ ) , os.path.basename(SCREAMING_SNAKE_CASE__ ) )
shutil.move(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
__a : Dict = (
self.df.mapInArrow(SCREAMING_SNAKE_CASE__ , 'task_id: long, num_examples: long, num_bytes: long' )
.groupBy('task_id' )
.agg(
pyspark.sql.functions.sum('num_examples' ).alias('total_num_examples' ) , pyspark.sql.functions.sum('num_bytes' ).alias('total_num_bytes' ) , pyspark.sql.functions.count('num_bytes' ).alias('num_shards' ) , pyspark.sql.functions.collect_list('num_examples' ).alias('shard_lengths' ) , )
.collect()
)
for row in stats:
yield row.task_id, (row.total_num_examples, row.total_num_bytes, row.num_shards, row.shard_lengths)
def __lowerCAmelCase ( self : Union[str, Any] , SCREAMING_SNAKE_CASE__ : "datasets.SplitGenerator" , SCREAMING_SNAKE_CASE__ : str = "arrow" , SCREAMING_SNAKE_CASE__ : Optional[Union[str, int]] = None , SCREAMING_SNAKE_CASE__ : Optional[int] = None , **SCREAMING_SNAKE_CASE__ : Optional[Any] , ):
'''simple docstring'''
self._validate_cache_dir()
__a : List[str] = convert_file_size_to_int(max_shard_size or MAX_SHARD_SIZE )
self._repartition_df_if_needed(SCREAMING_SNAKE_CASE__ )
__a : Union[str, Any] = not is_remote_filesystem(self._fs )
__a : Optional[Any] = os.path.join if is_local else posixpath.join
__a : Any = '-TTTTT-SSSSS-of-NNNNN'
__a : Union[str, Any] = f'''{self.name}-{split_generator.name}{SUFFIX}.{file_format}'''
__a : Any = path_join(self._output_dir , SCREAMING_SNAKE_CASE__ )
__a : Any = 0
__a : Dict = 0
__a : int = 0
__a : List[str] = []
__a : Optional[int] = []
for task_id, content in self._prepare_split_single(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
(
(
__a
) , (
__a
) , (
__a
) , (
__a
) ,
) : Optional[int] = content
if num_bytes > 0:
total_num_examples += num_examples
total_num_bytes += num_bytes
total_shards += num_shards
task_id_and_num_shards.append((task_id, num_shards) )
all_shard_lengths.extend(SCREAMING_SNAKE_CASE__ )
__a : List[str] = total_num_examples
__a : Optional[int] = total_num_bytes
# should rename everything at the end
logger.debug(f'''Renaming {total_shards} shards.''' )
if total_shards > 1:
__a : Any = all_shard_lengths
# Define fs outside of _rename_shard so that we don't reference self in the function, which will result in a
# pickling error due to pickling the SparkContext.
__a : Dict = self._fs
# use the -SSSSS-of-NNNNN pattern
def _rename_shard(
SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int , ):
rename(
SCREAMING_SNAKE_CASE__ , fpath.replace('SSSSS' , f'''{shard_id:05d}''' ).replace('TTTTT' , f'''{task_id:05d}''' ) , fpath.replace('TTTTT-SSSSS' , f'''{global_shard_id:05d}''' ).replace('NNNNN' , f'''{total_shards:05d}''' ) , )
__a : Union[str, Any] = []
__a : List[str] = 0
for i in range(len(SCREAMING_SNAKE_CASE__ ) ):
__a , __a : Union[str, Any] = task_id_and_num_shards[i]
for shard_id in range(SCREAMING_SNAKE_CASE__ ):
args.append([task_id, shard_id, global_shard_id] )
global_shard_id += 1
self._spark.sparkContext.parallelize(SCREAMING_SNAKE_CASE__ , len(SCREAMING_SNAKE_CASE__ ) ).map(lambda SCREAMING_SNAKE_CASE__ : _rename_shard(*SCREAMING_SNAKE_CASE__ ) ).collect()
else:
# don't use any pattern
__a : List[Any] = 0
__a : Any = task_id_and_num_shards[0][0]
self._rename(
fpath.replace('SSSSS' , f'''{shard_id:05d}''' ).replace('TTTTT' , f'''{task_id:05d}''' ) , fpath.replace(SCREAMING_SNAKE_CASE__ , '' ) , )
def __lowerCAmelCase ( self : str , SCREAMING_SNAKE_CASE__ : "datasets.SplitGenerator" , ):
'''simple docstring'''
return SparkExamplesIterable(self.df )
| 47 |
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ = '''▁'''
SCREAMING_SNAKE_CASE__ = {'''vocab_file''': '''sentencepiece.bpe.model'''}
SCREAMING_SNAKE_CASE__ = {
'''vocab_file''': {
'''facebook/xglm-564M''': '''https://huggingface.co/facebook/xglm-564M/resolve/main/sentencepiece.bpe.model''',
}
}
SCREAMING_SNAKE_CASE__ = {
'''facebook/xglm-564M''': 2048,
}
class _UpperCamelCase( __lowerCamelCase ):
__SCREAMING_SNAKE_CASE : Optional[Any] = VOCAB_FILES_NAMES
__SCREAMING_SNAKE_CASE : Union[str, Any] = PRETRAINED_VOCAB_FILES_MAP
__SCREAMING_SNAKE_CASE : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__SCREAMING_SNAKE_CASE : Any = ['''input_ids''', '''attention_mask''']
def __init__( self : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : List[Any]="<s>" , SCREAMING_SNAKE_CASE__ : List[str]="</s>" , SCREAMING_SNAKE_CASE__ : Optional[Any]="</s>" , SCREAMING_SNAKE_CASE__ : Union[str, Any]="<s>" , SCREAMING_SNAKE_CASE__ : str="<unk>" , SCREAMING_SNAKE_CASE__ : Dict="<pad>" , SCREAMING_SNAKE_CASE__ : Optional[Dict[str, Any]] = None , **SCREAMING_SNAKE_CASE__ : List[str] , ):
'''simple docstring'''
__a : Union[str, Any] = {} if sp_model_kwargs is None else sp_model_kwargs
# Compatibility with the original tokenizer
__a : Any = 7
__a : Union[str, Any] = [f'''<madeupword{i}>''' for i in range(self.num_madeup_words )]
__a : Union[str, Any] = kwargs.get('additional_special_tokens' , [] )
kwargs["additional_special_tokens"] += [
word for word in madeup_words if word not in kwargs["additional_special_tokens"]
]
super().__init__(
bos_token=SCREAMING_SNAKE_CASE__ , eos_token=SCREAMING_SNAKE_CASE__ , unk_token=SCREAMING_SNAKE_CASE__ , sep_token=SCREAMING_SNAKE_CASE__ , cls_token=SCREAMING_SNAKE_CASE__ , pad_token=SCREAMING_SNAKE_CASE__ , sp_model_kwargs=self.sp_model_kwargs , **SCREAMING_SNAKE_CASE__ , )
__a : List[str] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(SCREAMING_SNAKE_CASE__ ) )
__a : str = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
__a : Any = 1
# Mimic fairseq token-to-id alignment for the first 4 token
__a : str = {'<s>': 0, '<pad>': 1, '</s>': 2, '<unk>': 3}
__a : List[str] = len(self.sp_model )
__a : Optional[int] = {f'''<madeupword{i}>''': sp_size + i + self.fairseq_offset for i in range(self.num_madeup_words )}
self.fairseq_tokens_to_ids.update(SCREAMING_SNAKE_CASE__ )
__a : Dict = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def __getstate__( self : List[str] ):
'''simple docstring'''
__a : Tuple = self.__dict__.copy()
__a : List[str] = None
__a : Optional[int] = self.sp_model.serialized_model_proto()
return state
def __setstate__( self : List[str] , SCREAMING_SNAKE_CASE__ : Dict ):
'''simple docstring'''
__a : int = d
# for backward compatibility
if not hasattr(self , 'sp_model_kwargs' ):
__a : Dict = {}
__a : Tuple = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
def __lowerCAmelCase ( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : List[int] , SCREAMING_SNAKE_CASE__ : Optional[List[int]] = None ):
'''simple docstring'''
if token_ids_a is None:
return [self.sep_token_id] + token_ids_a
__a : Optional[Any] = [self.sep_token_id]
return sep + token_ids_a + sep + sep + token_ids_a
def __lowerCAmelCase ( self : Dict , SCREAMING_SNAKE_CASE__ : List[int] , SCREAMING_SNAKE_CASE__ : Optional[List[int]] = None , SCREAMING_SNAKE_CASE__ : bool = False ):
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=SCREAMING_SNAKE_CASE__ , token_ids_a=SCREAMING_SNAKE_CASE__ , already_has_special_tokens=SCREAMING_SNAKE_CASE__ )
if token_ids_a is None:
return [1] + ([0] * len(SCREAMING_SNAKE_CASE__ ))
return [1] + ([0] * len(SCREAMING_SNAKE_CASE__ )) + [1, 1] + ([0] * len(SCREAMING_SNAKE_CASE__ ))
def __lowerCAmelCase ( self : List[Any] , SCREAMING_SNAKE_CASE__ : List[int] , SCREAMING_SNAKE_CASE__ : Optional[List[int]] = None ):
'''simple docstring'''
__a : Optional[int] = [self.sep_token_id]
if token_ids_a is None:
return len(sep + token_ids_a ) * [0]
return len(sep + token_ids_a + sep + sep + token_ids_a ) * [0]
@property
def __lowerCAmelCase ( self : Optional[int] ):
'''simple docstring'''
return len(self.sp_model ) + self.fairseq_offset + self.num_madeup_words
def __lowerCAmelCase ( self : Tuple ):
'''simple docstring'''
__a : str = {self.convert_ids_to_tokens(SCREAMING_SNAKE_CASE__ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __lowerCAmelCase ( self : Any , SCREAMING_SNAKE_CASE__ : str ):
'''simple docstring'''
return self.sp_model.encode(SCREAMING_SNAKE_CASE__ , out_type=SCREAMING_SNAKE_CASE__ )
def __lowerCAmelCase ( self : List[Any] , SCREAMING_SNAKE_CASE__ : List[Any] ):
'''simple docstring'''
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
__a : List[str] = self.sp_model.PieceToId(SCREAMING_SNAKE_CASE__ )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def __lowerCAmelCase ( self : Optional[int] , SCREAMING_SNAKE_CASE__ : Dict ):
'''simple docstring'''
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def __lowerCAmelCase ( self : Tuple , SCREAMING_SNAKE_CASE__ : List[Any] ):
'''simple docstring'''
__a : Optional[int] = ''.join(SCREAMING_SNAKE_CASE__ ).replace(SCREAMING_SNAKE_CASE__ , ' ' ).strip()
return out_string
def __lowerCAmelCase ( self : Dict , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Optional[str] = None ):
'''simple docstring'''
if not os.path.isdir(SCREAMING_SNAKE_CASE__ ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
__a : Any = os.path.join(
SCREAMING_SNAKE_CASE__ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(SCREAMING_SNAKE_CASE__ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , SCREAMING_SNAKE_CASE__ )
elif not os.path.isfile(self.vocab_file ):
with open(SCREAMING_SNAKE_CASE__ , 'wb' ) as fi:
__a : List[Any] = self.sp_model.serialized_model_proto()
fi.write(SCREAMING_SNAKE_CASE__ )
return (out_vocab_file,)
| 47 | 1 |
import os
import random
import sys
from . import cryptomath_module as cryptomath
from . import rabin_miller
SCREAMING_SNAKE_CASE__ = 3
def UpperCAmelCase__ ( lowerCamelCase_ : int ):
print('Generating primitive root of p' )
while True:
__a : int = random.randrange(3 , lowerCamelCase_ )
if pow(lowerCamelCase_ , 2 , lowerCamelCase_ ) == 1:
continue
if pow(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) == 1:
continue
return g
def UpperCAmelCase__ ( lowerCamelCase_ : int ):
print('Generating prime p...' )
__a : str = rabin_miller.generate_large_prime(lowerCamelCase_ ) # select large prime number.
__a : List[Any] = primitive_root(lowerCamelCase_ ) # one primitive root on modulo p.
__a : Tuple = random.randrange(3 , lowerCamelCase_ ) # private_key -> have to be greater than 2 for safety.
__a : int = cryptomath.find_mod_inverse(pow(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) , lowerCamelCase_ )
__a : List[str] = (key_size, e_a, e_a, p)
__a : List[Any] = (key_size, d)
return public_key, private_key
def UpperCAmelCase__ ( lowerCamelCase_ : str , lowerCamelCase_ : int ):
if os.path.exists(f'''{name}_pubkey.txt''' ) or os.path.exists(f'''{name}_privkey.txt''' ):
print('\nWARNING:' )
print(
f'''"{name}_pubkey.txt" or "{name}_privkey.txt" already exists. \n'''
'Use a different name or delete these files and re-run this program.' )
sys.exit()
__a , __a : Dict = generate_key(lowerCamelCase_ )
print(f'''\nWriting public key to file {name}_pubkey.txt...''' )
with open(f'''{name}_pubkey.txt''' , 'w' ) as fo:
fo.write(f'''{public_key[0]},{public_key[1]},{public_key[2]},{public_key[3]}''' )
print(f'''Writing private key to file {name}_privkey.txt...''' )
with open(f'''{name}_privkey.txt''' , 'w' ) as fo:
fo.write(f'''{private_key[0]},{private_key[1]}''' )
def UpperCAmelCase__ ( ):
print('Making key files...' )
make_key_files('elgamal' , 2_0_4_8 )
print('Key files generation successful' )
if __name__ == "__main__":
main()
| 47 |
import argparse
from collections import OrderedDict
from pathlib import Path
import torch
from transformers import (
VisualBertConfig,
VisualBertForMultipleChoice,
VisualBertForPreTraining,
VisualBertForQuestionAnswering,
VisualBertForVisualReasoning,
)
from transformers.utils import logging
logging.set_verbosity_info()
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ = [
('''bert.bert''', '''visual_bert'''),
('''bert.cls''', '''cls'''),
('''bert.classifier''', '''cls'''),
('''token_type_embeddings_visual''', '''visual_token_type_embeddings'''),
('''position_embeddings_visual''', '''visual_position_embeddings'''),
('''projection''', '''visual_projection'''),
]
SCREAMING_SNAKE_CASE__ = [
'''nlvr2_coco_pre_trained.th''',
'''nlvr2_fine_tuned.th''',
'''nlvr2_pre_trained.th''',
'''vcr_coco_pre_train.th''',
'''vcr_fine_tune.th''',
'''vcr_pre_train.th''',
'''vqa_coco_pre_trained.th''',
'''vqa_fine_tuned.th''',
'''vqa_pre_trained.th''',
]
def UpperCAmelCase__ ( lowerCamelCase_ : Optional[int] ):
__a : str = torch.load(lowerCamelCase_ , map_location='cpu' )
return sd
def UpperCAmelCase__ ( lowerCamelCase_ : List[Any] , lowerCamelCase_ : int , lowerCamelCase_ : Dict=rename_keys_prefix ):
__a : Optional[Any] = OrderedDict()
__a : Any = torch.arange(config.max_position_embeddings ).expand((1, -1) )
# detector_d = OrderedDict()
for key in d:
if "detector" in key:
# detector_d[key.replace('detector.','')] = d[key]
continue
__a : List[Any] = key
for name_pair in rename_keys_prefix:
__a : List[str] = new_key.replace(name_pair[0] , name_pair[1] )
__a : Any = d[key]
if key == "bert.cls.predictions.decoder.weight":
# Old bert code didn't have `decoder.bias`, but was added separately
__a : int = new_d['cls.predictions.bias']
return new_d
@torch.no_grad()
def UpperCAmelCase__ ( lowerCamelCase_ : Any , lowerCamelCase_ : Any ):
assert (
checkpoint_path.split('/' )[-1] in ACCEPTABLE_CHECKPOINTS
), f'''The checkpoint provided must be in {ACCEPTABLE_CHECKPOINTS}.'''
# Get Config
if "pre" in checkpoint_path:
__a : Dict = 'pretraining'
if "vcr" in checkpoint_path:
__a : int = {'visual_embedding_dim': 5_1_2}
elif "vqa_advanced" in checkpoint_path:
__a : int = {'visual_embedding_dim': 2_0_4_8}
elif "vqa" in checkpoint_path:
__a : Tuple = {'visual_embedding_dim': 2_0_4_8}
elif "nlvr" in checkpoint_path:
__a : List[Any] = {'visual_embedding_dim': 1_0_2_4}
else:
raise NotImplementedError(f'''No implementation found for `{checkpoint_path}`.''' )
else:
if "vcr" in checkpoint_path:
__a : int = {'visual_embedding_dim': 5_1_2}
__a : Any = 'multichoice'
elif "vqa_advanced" in checkpoint_path:
__a : Any = {'visual_embedding_dim': 2_0_4_8}
__a : List[str] = 'vqa_advanced'
elif "vqa" in checkpoint_path:
__a : List[Any] = {'visual_embedding_dim': 2_0_4_8, 'num_labels': 3_1_2_9}
__a : List[Any] = 'vqa'
elif "nlvr" in checkpoint_path:
__a : Optional[int] = {
'visual_embedding_dim': 1_0_2_4,
'num_labels': 2,
}
__a : Optional[Any] = 'nlvr'
__a : str = VisualBertConfig(**lowerCamelCase_ )
# Load State Dict
__a : str = load_state_dict(lowerCamelCase_ )
__a : str = get_new_dict(lowerCamelCase_ , lowerCamelCase_ )
if model_type == "pretraining":
__a : Optional[Any] = VisualBertForPreTraining(lowerCamelCase_ )
elif model_type == "vqa":
__a : Any = VisualBertForQuestionAnswering(lowerCamelCase_ )
elif model_type == "nlvr":
__a : int = VisualBertForVisualReasoning(lowerCamelCase_ )
elif model_type == "multichoice":
__a : Optional[int] = VisualBertForMultipleChoice(lowerCamelCase_ )
model.load_state_dict(lowerCamelCase_ )
# Save Checkpoints
Path(lowerCamelCase_ ).mkdir(exist_ok=lowerCamelCase_ )
model.save_pretrained(lowerCamelCase_ )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument('''orig_checkpoint_path''', type=str, help='''A path to .th on local filesystem.''')
parser.add_argument('''pytorch_dump_folder_path''', type=str, help='''Path to the output PyTorch model.''')
SCREAMING_SNAKE_CASE__ = parser.parse_args()
convert_visual_bert_checkpoint(args.orig_checkpoint_path, args.pytorch_dump_folder_path)
| 47 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_torch_available,
)
SCREAMING_SNAKE_CASE__ = {
'''configuration_speecht5''': [
'''SPEECHT5_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''SPEECHT5_PRETRAINED_HIFIGAN_CONFIG_ARCHIVE_MAP''',
'''SpeechT5Config''',
'''SpeechT5HifiGanConfig''',
],
'''feature_extraction_speecht5''': ['''SpeechT5FeatureExtractor'''],
'''processing_speecht5''': ['''SpeechT5Processor'''],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ = ['''SpeechT5Tokenizer''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ = [
'''SPEECHT5_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''SpeechT5ForSpeechToText''',
'''SpeechT5ForSpeechToSpeech''',
'''SpeechT5ForTextToSpeech''',
'''SpeechT5Model''',
'''SpeechT5PreTrainedModel''',
'''SpeechT5HifiGan''',
]
if TYPE_CHECKING:
from .configuration_speechta import (
SPEECHT5_PRETRAINED_CONFIG_ARCHIVE_MAP,
SPEECHT5_PRETRAINED_HIFIGAN_CONFIG_ARCHIVE_MAP,
SpeechTaConfig,
SpeechTaHifiGanConfig,
)
from .feature_extraction_speechta import SpeechTaFeatureExtractor
from .processing_speechta import SpeechTaProcessor
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_speechta import SpeechTaTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_speechta import (
SPEECHT5_PRETRAINED_MODEL_ARCHIVE_LIST,
SpeechTaForSpeechToSpeech,
SpeechTaForSpeechToText,
SpeechTaForTextToSpeech,
SpeechTaHifiGan,
SpeechTaModel,
SpeechTaPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 47 |
print((lambda quine: quine % quine)('''print((lambda quine: quine %% quine)(%r))'''))
| 47 | 1 |
from __future__ import annotations
import typing
from collections import Counter
def UpperCAmelCase__ ( lowerCamelCase_ : int ):
__a : typing.Counter[int] = Counter()
for base in range(1 , max_perimeter + 1 ):
for perpendicular in range(lowerCamelCase_ , max_perimeter + 1 ):
__a : Any = (base * base + perpendicular * perpendicular) ** 0.5
if hypotenuse == int(lowerCamelCase_ ):
__a : Tuple = int(base + perpendicular + hypotenuse )
if perimeter > max_perimeter:
continue
triplets[perimeter] += 1
return triplets
def UpperCAmelCase__ ( lowerCamelCase_ : int = 1_0_0_0 ):
__a : Dict = pythagorean_triple(lowerCamelCase_ )
return triplets.most_common(1 )[0][0]
if __name__ == "__main__":
print(F"Perimeter {solution()} has maximum solutions")
| 47 |
import json
import os
import shutil
import tempfile
from unittest import TestCase
from transformers import BartTokenizer, BartTokenizerFast, DPRQuestionEncoderTokenizer, DPRQuestionEncoderTokenizerFast
from transformers.models.bart.configuration_bart import BartConfig
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES as DPR_VOCAB_FILES_NAMES
from transformers.models.dpr.configuration_dpr import DPRConfig
from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES as BART_VOCAB_FILES_NAMES
from transformers.testing_utils import require_faiss, require_tokenizers, require_torch, slow
from transformers.utils import is_datasets_available, is_faiss_available, is_torch_available
if is_torch_available() and is_datasets_available() and is_faiss_available():
from transformers.models.rag.configuration_rag import RagConfig
from transformers.models.rag.tokenization_rag import RagTokenizer
@require_faiss
@require_torch
class _UpperCamelCase( __lowerCamelCase ):
def __lowerCAmelCase ( self : List[Any] ):
'''simple docstring'''
__a : List[Any] = tempfile.mkdtemp()
__a : int = 8
# DPR tok
__a : Dict = [
'[UNK]',
'[CLS]',
'[SEP]',
'[PAD]',
'[MASK]',
'want',
'##want',
'##ed',
'wa',
'un',
'runn',
'##ing',
',',
'low',
'lowest',
]
__a : int = os.path.join(self.tmpdirname , 'dpr_tokenizer' )
os.makedirs(SCREAMING_SNAKE_CASE__ , exist_ok=SCREAMING_SNAKE_CASE__ )
__a : Dict = os.path.join(SCREAMING_SNAKE_CASE__ , DPR_VOCAB_FILES_NAMES['vocab_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) )
# BART tok
__a : str = [
'l',
'o',
'w',
'e',
'r',
's',
't',
'i',
'd',
'n',
'\u0120',
'\u0120l',
'\u0120n',
'\u0120lo',
'\u0120low',
'er',
'\u0120lowest',
'\u0120newer',
'\u0120wider',
'<unk>',
]
__a : Optional[int] = dict(zip(SCREAMING_SNAKE_CASE__ , range(len(SCREAMING_SNAKE_CASE__ ) ) ) )
__a : List[str] = ['#version: 0.2', '\u0120 l', '\u0120l o', '\u0120lo w', 'e r', '']
__a : List[str] = {'unk_token': '<unk>'}
__a : Dict = os.path.join(self.tmpdirname , 'bart_tokenizer' )
os.makedirs(SCREAMING_SNAKE_CASE__ , exist_ok=SCREAMING_SNAKE_CASE__ )
__a : List[Any] = os.path.join(SCREAMING_SNAKE_CASE__ , BART_VOCAB_FILES_NAMES['vocab_file'] )
__a : Dict = os.path.join(SCREAMING_SNAKE_CASE__ , BART_VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp:
fp.write(json.dumps(SCREAMING_SNAKE_CASE__ ) + '\n' )
with open(self.merges_file , 'w' , encoding='utf-8' ) as fp:
fp.write('\n'.join(SCREAMING_SNAKE_CASE__ ) )
def __lowerCAmelCase ( self : Tuple ):
'''simple docstring'''
return DPRQuestionEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname , 'dpr_tokenizer' ) )
def __lowerCAmelCase ( self : str ):
'''simple docstring'''
return BartTokenizer.from_pretrained(os.path.join(self.tmpdirname , 'bart_tokenizer' ) )
def __lowerCAmelCase ( self : Union[str, Any] ):
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
@require_tokenizers
def __lowerCAmelCase ( self : Dict ):
'''simple docstring'''
__a : Tuple = os.path.join(self.tmpdirname , 'rag_tokenizer' )
__a : Optional[Any] = RagConfig(question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() )
__a : Optional[Any] = RagTokenizer(question_encoder=self.get_dpr_tokenizer() , generator=self.get_bart_tokenizer() )
rag_config.save_pretrained(SCREAMING_SNAKE_CASE__ )
rag_tokenizer.save_pretrained(SCREAMING_SNAKE_CASE__ )
__a : List[Any] = RagTokenizer.from_pretrained(SCREAMING_SNAKE_CASE__ , config=SCREAMING_SNAKE_CASE__ )
self.assertIsInstance(new_rag_tokenizer.question_encoder , SCREAMING_SNAKE_CASE__ )
self.assertEqual(new_rag_tokenizer.question_encoder.get_vocab() , rag_tokenizer.question_encoder.get_vocab() )
self.assertIsInstance(new_rag_tokenizer.generator , SCREAMING_SNAKE_CASE__ )
self.assertEqual(new_rag_tokenizer.generator.get_vocab() , rag_tokenizer.generator.get_vocab() )
@slow
def __lowerCAmelCase ( self : Optional[Any] ):
'''simple docstring'''
__a : Optional[Any] = RagTokenizer.from_pretrained('facebook/rag-token-nq' )
__a : List[Any] = [
'who got the first nobel prize in physics',
'when is the next deadpool movie being released',
'which mode is used for short wave broadcast service',
'who is the owner of reading football club',
'when is the next scandal episode coming out',
'when is the last time the philadelphia won the superbowl',
'what is the most current adobe flash player version',
'how many episodes are there in dragon ball z',
'what is the first step in the evolution of the eye',
'where is gall bladder situated in human body',
'what is the main mineral in lithium batteries',
'who is the president of usa right now',
'where do the greasers live in the outsiders',
'panda is a national animal of which country',
'what is the name of manchester united stadium',
]
__a : Tuple = tokenizer(SCREAMING_SNAKE_CASE__ )
self.assertIsNotNone(SCREAMING_SNAKE_CASE__ )
@slow
def __lowerCAmelCase ( self : Optional[int] ):
'''simple docstring'''
__a : Any = RagTokenizer.from_pretrained('facebook/rag-sequence-nq' )
__a : Union[str, Any] = [
'who got the first nobel prize in physics',
'when is the next deadpool movie being released',
'which mode is used for short wave broadcast service',
'who is the owner of reading football club',
'when is the next scandal episode coming out',
'when is the last time the philadelphia won the superbowl',
'what is the most current adobe flash player version',
'how many episodes are there in dragon ball z',
'what is the first step in the evolution of the eye',
'where is gall bladder situated in human body',
'what is the main mineral in lithium batteries',
'who is the president of usa right now',
'where do the greasers live in the outsiders',
'panda is a national animal of which country',
'what is the name of manchester united stadium',
]
__a : str = tokenizer(SCREAMING_SNAKE_CASE__ )
self.assertIsNotNone(SCREAMING_SNAKE_CASE__ )
| 47 | 1 |
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from ..models.auto import AutoModelForSequenceClassification, AutoTokenizer
from .base import PipelineTool
class _UpperCamelCase( __lowerCamelCase ):
__SCREAMING_SNAKE_CASE : List[str] = '''facebook/bart-large-mnli'''
__SCREAMING_SNAKE_CASE : int = (
'''This is a tool that classifies an English text using provided labels. It takes two inputs: `text`, which '''
'''should be the text to classify, and `labels`, which should be the list of labels to use for classification. '''
'''It returns the most likely label in the list of provided `labels` for the input text.'''
)
__SCREAMING_SNAKE_CASE : Optional[Any] = '''text_classifier'''
__SCREAMING_SNAKE_CASE : Any = AutoTokenizer
__SCREAMING_SNAKE_CASE : Optional[int] = AutoModelForSequenceClassification
__SCREAMING_SNAKE_CASE : List[str] = ['''text''', ['''text''']]
__SCREAMING_SNAKE_CASE : Optional[int] = ['''text''']
def __lowerCAmelCase ( self : Optional[int] ):
'''simple docstring'''
super().setup()
__a : Tuple = self.model.config
__a : str = -1
for idx, label in config.idalabel.items():
if label.lower().startswith('entail' ):
__a : Any = int(SCREAMING_SNAKE_CASE__ )
if self.entailment_id == -1:
raise ValueError('Could not determine the entailment ID from the model config, please pass it at init.' )
def __lowerCAmelCase ( self : int , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Tuple ):
'''simple docstring'''
__a : Any = labels
return self.pre_processor(
[text] * len(SCREAMING_SNAKE_CASE__ ) , [f'''This example is {label}''' for label in labels] , return_tensors='pt' , padding='max_length' , )
def __lowerCAmelCase ( self : Tuple , SCREAMING_SNAKE_CASE__ : Tuple ):
'''simple docstring'''
__a : Dict = outputs.logits
__a : List[str] = torch.argmax(logits[:, 2] ).item()
return self._labels[label_id]
| 47 |
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ = {'''vocab_file''': '''spiece.model'''}
SCREAMING_SNAKE_CASE__ = {
'''vocab_file''': {
'''bert_for_seq_generation''': (
'''https://huggingface.co/google/bert_for_seq_generation_L-24_bbc_encoder/resolve/main/spiece.model'''
),
}
}
SCREAMING_SNAKE_CASE__ = {'''bert_for_seq_generation''': 512}
class _UpperCamelCase( __lowerCamelCase ):
__SCREAMING_SNAKE_CASE : int = VOCAB_FILES_NAMES
__SCREAMING_SNAKE_CASE : List[str] = PRETRAINED_VOCAB_FILES_MAP
__SCREAMING_SNAKE_CASE : Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__SCREAMING_SNAKE_CASE : List[int] = []
__SCREAMING_SNAKE_CASE : int = ['''input_ids''', '''attention_mask''']
def __init__( self : str , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : List[Any]="<s>" , SCREAMING_SNAKE_CASE__ : Tuple="</s>" , SCREAMING_SNAKE_CASE__ : Any="<unk>" , SCREAMING_SNAKE_CASE__ : int="<pad>" , SCREAMING_SNAKE_CASE__ : List[str]="<::::>" , SCREAMING_SNAKE_CASE__ : Optional[Dict[str, Any]] = None , **SCREAMING_SNAKE_CASE__ : Tuple , ):
'''simple docstring'''
__a : Tuple = {} if sp_model_kwargs is None else sp_model_kwargs
# Add extra_ids to the special token list
super().__init__(
bos_token=SCREAMING_SNAKE_CASE__ , eos_token=SCREAMING_SNAKE_CASE__ , unk_token=SCREAMING_SNAKE_CASE__ , pad_token=SCREAMING_SNAKE_CASE__ , sep_token=SCREAMING_SNAKE_CASE__ , sp_model_kwargs=self.sp_model_kwargs , **SCREAMING_SNAKE_CASE__ , )
__a : int = vocab_file
__a : Union[str, Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(SCREAMING_SNAKE_CASE__ )
@property
def __lowerCAmelCase ( self : Optional[int] ):
'''simple docstring'''
return self.sp_model.get_piece_size()
def __lowerCAmelCase ( self : int ):
'''simple docstring'''
__a : Dict = {self.convert_ids_to_tokens(SCREAMING_SNAKE_CASE__ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : Optional[Any] ):
'''simple docstring'''
__a : Union[str, Any] = self.__dict__.copy()
__a : Any = None
return state
def __setstate__( self : int , SCREAMING_SNAKE_CASE__ : List[str] ):
'''simple docstring'''
__a : str = d
# for backward compatibility
if not hasattr(self , 'sp_model_kwargs' ):
__a : str = {}
__a : List[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def __lowerCAmelCase ( self : Dict , SCREAMING_SNAKE_CASE__ : str ):
'''simple docstring'''
return self.sp_model.encode(SCREAMING_SNAKE_CASE__ , out_type=SCREAMING_SNAKE_CASE__ )
def __lowerCAmelCase ( self : str , SCREAMING_SNAKE_CASE__ : Optional[int] ):
'''simple docstring'''
return self.sp_model.piece_to_id(SCREAMING_SNAKE_CASE__ )
def __lowerCAmelCase ( self : int , SCREAMING_SNAKE_CASE__ : str ):
'''simple docstring'''
__a : int = self.sp_model.IdToPiece(SCREAMING_SNAKE_CASE__ )
return token
def __lowerCAmelCase ( self : Tuple , SCREAMING_SNAKE_CASE__ : Optional[int] ):
'''simple docstring'''
__a : Optional[Any] = []
__a : Optional[int] = ''
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(SCREAMING_SNAKE_CASE__ ) + token
__a : Dict = []
else:
current_sub_tokens.append(SCREAMING_SNAKE_CASE__ )
out_string += self.sp_model.decode(SCREAMING_SNAKE_CASE__ )
return out_string.strip()
def __lowerCAmelCase ( self : List[Any] , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Optional[str] = None ):
'''simple docstring'''
if not os.path.isdir(SCREAMING_SNAKE_CASE__ ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
__a : Tuple = os.path.join(
SCREAMING_SNAKE_CASE__ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(SCREAMING_SNAKE_CASE__ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , SCREAMING_SNAKE_CASE__ )
elif not os.path.isfile(self.vocab_file ):
with open(SCREAMING_SNAKE_CASE__ , 'wb' ) as fi:
__a : List[str] = self.sp_model.serialized_model_proto()
fi.write(SCREAMING_SNAKE_CASE__ )
return (out_vocab_file,)
| 47 | 1 |
# HF Trainer benchmarking tool
#
# This tool can be used to run and compare multiple dimensions of the HF Trainers args.
#
# It then prints a report once in github format with all the information that needs to be shared
# with others and second time in a console-friendly format, so it's easier to use for tuning things up.
#
# The main idea is:
#
# ./trainer-benchmark.py --base-cmd '<cmd args that don't change>' \
# --variations '--tf32 0|--tf32 1' '--fp16 0|--fp16 1|--bf16 1' \
# --target-metric-key train_samples_per_second
#
# The variations can be any command line argument that you want to compare and not just dtype as in
# the example.
#
# --variations allows you to compare variations in multiple dimensions.
#
# as the first dimention has 2 options and the second 3 in our example, this will run the trainer 6
# times adding one of:
#
# 1. --tf32 0 --fp16 0
# 2. --tf32 0 --fp16 1
# 3. --tf32 0 --bf16 1
# 4. --tf32 1 --fp16 0
# 5. --tf32 1 --fp16 1
# 6. --tf32 1 --bf16 1
#
# and print the results. This is just a cartesian product - and more than 2 dimensions can be used.
#
# If you want to rely on defaults, this:
# --variations '--tf32 0|--tf32 1' '--fp16 0|--fp16 1|--bf16 1'
# is identical to this:
# --variations '--tf32 0|--tf32 1' '|--fp16|--bf16'
#
# the leading empty variation in the 2nd dimension is a valid variation.
#
# So here we get the following 6 variations:
#
# 1. --tf32 0
# 2. --tf32 0 --fp16
# 3. --tf32 0 --bf16
# 4. --tf32 1
# 5. --tf32 1 --fp16
# 6. --tf32 1 --bf16
#
# In this particular case we don't know what the default tf32 setting is as it's normally
# pytorch-version dependent). That's why it's best to do an explicit setting of each variation:
# `--tf32 0|--tf32 1`
#
# Here is a full example of a train:
#
# CUDA_VISIBLE_DEVICES=0 python ./scripts/benchmark/trainer-benchmark.py \
# --base-cmd \
# ' examples/pytorch/translation/run_translation.py --model_name_or_path t5-small \
# --output_dir output_dir --do_train --label_smoothing 0.1 --logging_strategy no \
# --save_strategy no --per_device_train_batch_size 32 --max_source_length 512 \
# --max_target_length 512 --num_train_epochs 1 --overwrite_output_dir \
# --source_lang en --target_lang ro --dataset_name wmt16 --dataset_config "ro-en" \
# --source_prefix "translate English to Romanian: " --warmup_steps 50 \
# --max_train_samples 20000 --dataloader_num_workers 2 ' \
# --target-metric-key train_samples_per_second --repeat-times 1 --variations \
# '|--fp16|--bf16' '--tf32 0|--tf32 1' --report-metric-keys train_loss \
# --repeat-times 1 --base-variation '--tf32 0'
#
# and here is a possible output:
#
#
# | Variation | Train | Diff | Train |
# | | samples | % | loss |
# | | per | | |
# | | second | | |
# |:----------------|----------:|-------:|--------:|
# | --tf32 0 | 285.11 | 0 | 2.51 |
# | --tf32 1 | 342.09 | 20 | 2.51 |
# | --fp16 --tf32 0 | 423.49 | 49 | 2.51 |
# | --fp16 --tf32 1 | 423.13 | 48 | 2.51 |
# | --bf16 --tf32 0 | 416.80 | 46 | 2.52 |
# | --bf16 --tf32 1 | 415.87 | 46 | 2.52 |
#
#
# So you can quickly compare the different outcomes.
#
# Typically running each experiment once is enough, but if the environment is unstable you can
# re-run each multiple times, e.g., 3 using --repeat-times 3 and it will report the averaged results.
#
# By default it'll use the lowest result as the base line to use as 100% and then compare the rest to
# it as can be seen from the table above, but you can also specify which combination is the one to use as
# the baseline, e.g., to change to another entry use: --base-variation '--tf32 1 --fp16 0'
#
# --target-metric-key is there to tell the program which metrics to compare - the different metric keys are
# inside output_dir/all_results.json. e.g., to measure eval performance instead of train use:
# --target-metric-key eval_samples_per_second
# but of course you will need to adjust the --base-cmd value in the example to perform evaluation as
# well (as currently it doesn't)
#
import argparse
import datetime
import io
import itertools
import json
import math
import os
import platform
import re
import shlex
import subprocess
import sys
from pathlib import Path
from statistics import fmean
import pandas as pd
import torch
from tqdm import tqdm
import transformers
SCREAMING_SNAKE_CASE__ = float('''nan''')
class _UpperCamelCase:
def __init__( self : List[Any] , SCREAMING_SNAKE_CASE__ : Optional[int] ):
'''simple docstring'''
__a : Union[str, Any] = sys.stdout
__a : Tuple = open(SCREAMING_SNAKE_CASE__ , 'a' )
def __getattr__( self : str , SCREAMING_SNAKE_CASE__ : List[str] ):
'''simple docstring'''
return getattr(self.stdout , SCREAMING_SNAKE_CASE__ )
def __lowerCAmelCase ( self : List[str] , SCREAMING_SNAKE_CASE__ : Dict ):
'''simple docstring'''
self.stdout.write(SCREAMING_SNAKE_CASE__ )
# strip tqdm codes
self.file.write(re.sub(r'^.*\r' , '' , SCREAMING_SNAKE_CASE__ , 0 , re.M ) )
def UpperCAmelCase__ ( lowerCamelCase_ : Dict=8_0 , lowerCamelCase_ : int=False ):
__a : Tuple = []
# deal with critical env vars
__a : int = ['CUDA_VISIBLE_DEVICES']
for key in env_keys:
__a : Any = os.environ.get(lowerCamelCase_ , lowerCamelCase_ )
if val is not None:
cmd.append(f'''{key}={val}''' )
# python executable (not always needed if the script is executable)
__a : Union[str, Any] = sys.executable if full_python_path else sys.executable.split('/' )[-1]
cmd.append(lowerCamelCase_ )
# now the normal args
cmd += list(map(shlex.quote , sys.argv ) )
# split up into up to MAX_WIDTH lines with shell multi-line escapes
__a : List[Any] = []
__a : Optional[Any] = ''
while len(lowerCamelCase_ ) > 0:
current_line += f'''{cmd.pop(0 )} '''
if len(lowerCamelCase_ ) == 0 or len(lowerCamelCase_ ) + len(cmd[0] ) + 1 > max_width - 1:
lines.append(lowerCamelCase_ )
__a : List[Any] = ''
return "\\\n".join(lowerCamelCase_ )
def UpperCAmelCase__ ( lowerCamelCase_ : List[Any] , lowerCamelCase_ : Tuple ):
# unwrap multi-line input
__a : Dict = re.sub(R'[\\\n]+' , ' ' , args.base_cmd )
# remove --output_dir if any and set our own
__a : Union[str, Any] = re.sub('--output_dir\s+[^\s]+' , '' , args.base_cmd )
args.base_cmd += f''' --output_dir {output_dir}'''
# ensure we have --overwrite_output_dir
__a : List[Any] = re.sub('--overwrite_output_dir\s+' , '' , args.base_cmd )
args.base_cmd += " --overwrite_output_dir"
return [sys.executable] + shlex.split(args.base_cmd )
def UpperCAmelCase__ ( lowerCamelCase_ : Optional[Any] , lowerCamelCase_ : Union[str, Any] , lowerCamelCase_ : Optional[int] , lowerCamelCase_ : str , lowerCamelCase_ : Dict , lowerCamelCase_ : List[str] , lowerCamelCase_ : Union[str, Any] ):
# Enable to debug everything but the run itself, to do it fast and see the progress.
# This is useful for debugging the output formatting quickly - we can remove it later once
# everybody is happy with the output
if 0:
import random
from time import sleep
sleep(0 )
return dict(
{k: random.uniform(0 , 1_0_0 ) for k in metric_keys} , **{target_metric_key: random.choice([nan, 10.31, 100.2, 55.6666, 222.22222222] )} , )
__a : Dict = subprocess.run(lowerCamelCase_ , capture_output=lowerCamelCase_ , text=lowerCamelCase_ )
if verbose:
print('STDOUT' , result.stdout )
print('STDERR' , result.stderr )
# save the streams
__a : str = variation.replace(' ' , '-' )
with open(Path(lowerCamelCase_ ) / f'''log.{prefix}.stdout.txt''' , 'w' ) as f:
f.write(result.stdout )
with open(Path(lowerCamelCase_ ) / f'''log.{prefix}.stderr.txt''' , 'w' ) as f:
f.write(result.stderr )
if result.returncode != 0:
if verbose:
print('failed' )
return {target_metric_key: nan}
with io.open(f'''{output_dir}/all_results.json''' , 'r' , encoding='utf-8' ) as f:
__a : Union[str, Any] = json.load(lowerCamelCase_ )
# filter out just the keys we want
return {k: v for k, v in metrics.items() if k in metric_keys}
def UpperCAmelCase__ ( lowerCamelCase_ : Dict , lowerCamelCase_ : Tuple , lowerCamelCase_ : Tuple , lowerCamelCase_ : Tuple , lowerCamelCase_ : List[str] , lowerCamelCase_ : List[Any] , lowerCamelCase_ : Tuple , lowerCamelCase_ : Union[str, Any] , lowerCamelCase_ : int , lowerCamelCase_ : Any , ):
__a : Optional[Any] = []
__a : List[str] = []
__a : str = f'''{id}: {variation:<{longest_variation_len}}'''
__a : str = f'''{preamble}: '''
__a : Union[str, Any] = set(report_metric_keys + [target_metric_key] )
for i in tqdm(range(lowerCamelCase_ ) , desc=lowerCamelCase_ , leave=lowerCamelCase_ ):
__a : Optional[int] = process_run_single(
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
__a : List[str] = single_run_metrics[target_metric_key]
if not math.isnan(lowerCamelCase_ ):
metrics.append(lowerCamelCase_ )
results.append(lowerCamelCase_ )
outcome += "✓"
else:
outcome += "✘"
__a : Dict = f'''\33[2K\r{outcome}'''
if len(lowerCamelCase_ ) > 0:
__a : Optional[Any] = {k: fmean([x[k] for x in metrics] ) for k in metrics[0].keys()}
__a : str = round(mean_metrics[target_metric_key] , 2 )
__a : Optional[int] = f'''{outcome} {mean_target}'''
if len(lowerCamelCase_ ) > 1:
results_str += f''' {tuple(round(lowerCamelCase_ , 2 ) for x in results )}'''
print(lowerCamelCase_ )
__a : Any = variation
return mean_metrics
else:
print(lowerCamelCase_ )
return {variation_key: variation, target_metric_key: nan}
def UpperCAmelCase__ ( ):
__a : List[str] = torch.cuda.get_device_properties(torch.device('cuda' ) )
return f'''
Datetime : {datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S" )}
Software:
transformers: {transformers.__version__}
torch : {torch.__version__}
cuda : {torch.version.cuda}
python : {platform.python_version()}
Hardware:
{torch.cuda.device_count()} GPUs : {properties.name}, {properties.total_memory/2**3_0:0.2f}GB
'''
def UpperCAmelCase__ ( lowerCamelCase_ : Tuple , lowerCamelCase_ : Optional[Any] , lowerCamelCase_ : Optional[int] , lowerCamelCase_ : Optional[int] , lowerCamelCase_ : List[Any] ):
__a : Any = pd.DataFrame(lowerCamelCase_ )
__a : Optional[int] = 'variation'
__a : Optional[Any] = 'diff_%'
__a : Union[str, Any] = nan
if base_variation is not None and len(df[df[variation_key] == base_variation] ):
# this may still return nan
__a : str = df.loc[df[variation_key] == base_variation][target_metric_key].item()
if math.isnan(lowerCamelCase_ ):
# as a fallback, use the minimal value as the sentinel
__a : Optional[Any] = df.loc[df[target_metric_key] != nan][target_metric_key].min()
# create diff column if possible
if not math.isnan(lowerCamelCase_ ):
__a : str = df.apply(
lambda lowerCamelCase_ : round(1_0_0 * (r[target_metric_key] - sentinel_value) / sentinel_value )
if not math.isnan(r[target_metric_key] )
else 0 , axis='columns' , )
# re-order columns
__a : int = [variation_key, target_metric_key, diff_key, *report_metric_keys]
__a : List[str] = df.reindex(lowerCamelCase_ , axis='columns' ) # reorder cols
# capitalize
__a : Dict = df.rename(str.capitalize , axis='columns' )
# make the cols as narrow as possible
__a : Dict = df.rename(lambda lowerCamelCase_ : c.replace('_' , '<br>' ) , axis='columns' )
__a : Optional[Any] = df.rename(lambda lowerCamelCase_ : c.replace('_' , '\n' ) , axis='columns' )
__a : str = ['', 'Copy between the cut-here-lines and paste as is to github or a forum']
report += ["----------8<-----------------8<--------"]
report += ["*** Results:", df_github.to_markdown(index=lowerCamelCase_ , floatfmt='.2f' )]
report += ["```"]
report += ["*** Setup:", get_versions()]
report += ["*** The benchmark command line was:", get_original_command()]
report += ["```"]
report += ["----------8<-----------------8<--------"]
report += ["*** Results (console):", df_console.to_markdown(index=lowerCamelCase_ , floatfmt='.2f' )]
print('\n\n'.join(lowerCamelCase_ ) )
def UpperCAmelCase__ ( ):
__a : Optional[int] = argparse.ArgumentParser()
parser.add_argument(
'--base-cmd' , default=lowerCamelCase_ , type=lowerCamelCase_ , required=lowerCamelCase_ , help='Base cmd' , )
parser.add_argument(
'--variations' , default=lowerCamelCase_ , type=lowerCamelCase_ , nargs='+' , required=lowerCamelCase_ , help='Multi-dimensional variations, example: \'|--fp16|--bf16\' \'|--tf32\'' , )
parser.add_argument(
'--base-variation' , default=lowerCamelCase_ , type=lowerCamelCase_ , help='Baseline variation to compare to. if None the minimal target value will be used to compare against' , )
parser.add_argument(
'--target-metric-key' , default=lowerCamelCase_ , type=lowerCamelCase_ , required=lowerCamelCase_ , help='Target metric key in output_dir/all_results.json, e.g., train_samples_per_second' , )
parser.add_argument(
'--report-metric-keys' , default='' , type=lowerCamelCase_ , help='Report metric keys - other metric keys from output_dir/all_results.json to report, e.g., train_loss. Use a single argument e.g., \'train_loss train_samples' , )
parser.add_argument(
'--repeat-times' , default=1 , type=lowerCamelCase_ , help='How many times to re-run each variation - an average will be reported' , )
parser.add_argument(
'--output_dir' , default='output_benchmark' , type=lowerCamelCase_ , help='The output directory where all the benchmark reports will go to and additionally this directory will be used to override --output_dir in the script that is being benchmarked' , )
parser.add_argument(
'--verbose' , default=lowerCamelCase_ , action='store_true' , help='Whether to show the outputs of each run or just the benchmark progress' , )
__a : int = parser.parse_args()
__a : str = args.output_dir
Path(lowerCamelCase_ ).mkdir(exist_ok=lowerCamelCase_ )
__a : int = get_base_command(lowerCamelCase_ , lowerCamelCase_ )
# split each dimension into its --foo variations
__a : List[str] = [list(map(str.strip , re.split(R'\|' , lowerCamelCase_ ) ) ) for x in args.variations]
# build a cartesian product of dimensions and convert those back into cmd-line arg strings,
# while stripping white space for inputs that were empty
__a : Any = list(map(str.strip , map(' '.join , itertools.product(*lowerCamelCase_ ) ) ) )
__a : Any = max(len(lowerCamelCase_ ) for x in variations )
# split wanted keys
__a : Dict = args.report_metric_keys.split()
# capture prints into a log file for convenience
__a : Any = f'''benchmark-report-{datetime.datetime.now().strftime("%Y-%m-%d-%H-%M-%S" )}.txt'''
print(f'''\nNote: each run\'s output is also logged under {output_dir}/log.*.std*.txt''' )
print(f'''and this script\'s output is also piped into {report_fn}''' )
__a : List[str] = Tee(lowerCamelCase_ )
print(f'''\n*** Running {len(lowerCamelCase_ )} benchmarks:''' )
print(f'''Base command: {" ".join(lowerCamelCase_ )}''' )
__a : List[Any] = 'variation'
__a : Dict = []
for id, variation in enumerate(tqdm(lowerCamelCase_ , desc='Total completion: ' , leave=lowerCamelCase_ ) ):
__a : int = base_cmd + variation.split()
results.append(
process_run(
id + 1 , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , args.target_metric_key , lowerCamelCase_ , args.repeat_times , lowerCamelCase_ , args.verbose , ) )
process_results(lowerCamelCase_ , args.target_metric_key , lowerCamelCase_ , args.base_variation , lowerCamelCase_ )
if __name__ == "__main__":
main()
| 47 |
from ..utils import DummyObject, requires_backends
class _UpperCamelCase( metaclass=__lowerCamelCase ):
__SCREAMING_SNAKE_CASE : Optional[Any] = ['''torch''', '''transformers''', '''onnx''']
def __init__( self : Dict , *SCREAMING_SNAKE_CASE__ : Any , **SCREAMING_SNAKE_CASE__ : Any ):
'''simple docstring'''
requires_backends(self , ['torch', 'transformers', 'onnx'] )
@classmethod
def __lowerCAmelCase ( cls : Tuple , *SCREAMING_SNAKE_CASE__ : Optional[Any] , **SCREAMING_SNAKE_CASE__ : Optional[int] ):
'''simple docstring'''
requires_backends(cls , ['torch', 'transformers', 'onnx'] )
@classmethod
def __lowerCAmelCase ( cls : Any , *SCREAMING_SNAKE_CASE__ : Optional[Any] , **SCREAMING_SNAKE_CASE__ : Tuple ):
'''simple docstring'''
requires_backends(cls , ['torch', 'transformers', 'onnx'] )
class _UpperCamelCase( metaclass=__lowerCamelCase ):
__SCREAMING_SNAKE_CASE : List[Any] = ['''torch''', '''transformers''', '''onnx''']
def __init__( self : Any , *SCREAMING_SNAKE_CASE__ : int , **SCREAMING_SNAKE_CASE__ : List[Any] ):
'''simple docstring'''
requires_backends(self , ['torch', 'transformers', 'onnx'] )
@classmethod
def __lowerCAmelCase ( cls : List[str] , *SCREAMING_SNAKE_CASE__ : Tuple , **SCREAMING_SNAKE_CASE__ : int ):
'''simple docstring'''
requires_backends(cls , ['torch', 'transformers', 'onnx'] )
@classmethod
def __lowerCAmelCase ( cls : str , *SCREAMING_SNAKE_CASE__ : int , **SCREAMING_SNAKE_CASE__ : List[Any] ):
'''simple docstring'''
requires_backends(cls , ['torch', 'transformers', 'onnx'] )
class _UpperCamelCase( metaclass=__lowerCamelCase ):
__SCREAMING_SNAKE_CASE : Any = ['''torch''', '''transformers''', '''onnx''']
def __init__( self : Tuple , *SCREAMING_SNAKE_CASE__ : List[str] , **SCREAMING_SNAKE_CASE__ : List[Any] ):
'''simple docstring'''
requires_backends(self , ['torch', 'transformers', 'onnx'] )
@classmethod
def __lowerCAmelCase ( cls : str , *SCREAMING_SNAKE_CASE__ : Any , **SCREAMING_SNAKE_CASE__ : Union[str, Any] ):
'''simple docstring'''
requires_backends(cls , ['torch', 'transformers', 'onnx'] )
@classmethod
def __lowerCAmelCase ( cls : Dict , *SCREAMING_SNAKE_CASE__ : List[Any] , **SCREAMING_SNAKE_CASE__ : int ):
'''simple docstring'''
requires_backends(cls , ['torch', 'transformers', 'onnx'] )
class _UpperCamelCase( metaclass=__lowerCamelCase ):
__SCREAMING_SNAKE_CASE : Dict = ['''torch''', '''transformers''', '''onnx''']
def __init__( self : Any , *SCREAMING_SNAKE_CASE__ : Dict , **SCREAMING_SNAKE_CASE__ : int ):
'''simple docstring'''
requires_backends(self , ['torch', 'transformers', 'onnx'] )
@classmethod
def __lowerCAmelCase ( cls : int , *SCREAMING_SNAKE_CASE__ : Tuple , **SCREAMING_SNAKE_CASE__ : Tuple ):
'''simple docstring'''
requires_backends(cls , ['torch', 'transformers', 'onnx'] )
@classmethod
def __lowerCAmelCase ( cls : Any , *SCREAMING_SNAKE_CASE__ : int , **SCREAMING_SNAKE_CASE__ : str ):
'''simple docstring'''
requires_backends(cls , ['torch', 'transformers', 'onnx'] )
class _UpperCamelCase( metaclass=__lowerCamelCase ):
__SCREAMING_SNAKE_CASE : int = ['''torch''', '''transformers''', '''onnx''']
def __init__( self : List[str] , *SCREAMING_SNAKE_CASE__ : Tuple , **SCREAMING_SNAKE_CASE__ : List[Any] ):
'''simple docstring'''
requires_backends(self , ['torch', 'transformers', 'onnx'] )
@classmethod
def __lowerCAmelCase ( cls : int , *SCREAMING_SNAKE_CASE__ : Tuple , **SCREAMING_SNAKE_CASE__ : Union[str, Any] ):
'''simple docstring'''
requires_backends(cls , ['torch', 'transformers', 'onnx'] )
@classmethod
def __lowerCAmelCase ( cls : Optional[Any] , *SCREAMING_SNAKE_CASE__ : List[Any] , **SCREAMING_SNAKE_CASE__ : int ):
'''simple docstring'''
requires_backends(cls , ['torch', 'transformers', 'onnx'] )
class _UpperCamelCase( metaclass=__lowerCamelCase ):
__SCREAMING_SNAKE_CASE : Optional[int] = ['''torch''', '''transformers''', '''onnx''']
def __init__( self : Optional[int] , *SCREAMING_SNAKE_CASE__ : Dict , **SCREAMING_SNAKE_CASE__ : Any ):
'''simple docstring'''
requires_backends(self , ['torch', 'transformers', 'onnx'] )
@classmethod
def __lowerCAmelCase ( cls : Optional[Any] , *SCREAMING_SNAKE_CASE__ : Tuple , **SCREAMING_SNAKE_CASE__ : Union[str, Any] ):
'''simple docstring'''
requires_backends(cls , ['torch', 'transformers', 'onnx'] )
@classmethod
def __lowerCAmelCase ( cls : int , *SCREAMING_SNAKE_CASE__ : Optional[Any] , **SCREAMING_SNAKE_CASE__ : Tuple ):
'''simple docstring'''
requires_backends(cls , ['torch', 'transformers', 'onnx'] )
| 47 | 1 |
import datasets
from .nmt_bleu import compute_bleu # From: https://github.com/tensorflow/nmt/blob/master/nmt/scripts/bleu.py
SCREAMING_SNAKE_CASE__ = '''\
@INPROCEEDINGS{Papineni02bleu:a,
author = {Kishore Papineni and Salim Roukos and Todd Ward and Wei-jing Zhu},
title = {BLEU: a Method for Automatic Evaluation of Machine Translation},
booktitle = {},
year = {2002},
pages = {311--318}
}
@inproceedings{lin-och-2004-orange,
title = "{ORANGE}: a Method for Evaluating Automatic Evaluation Metrics for Machine Translation",
author = "Lin, Chin-Yew and
Och, Franz Josef",
booktitle = "{COLING} 2004: Proceedings of the 20th International Conference on Computational Linguistics",
month = "aug 23{--}aug 27",
year = "2004",
address = "Geneva, Switzerland",
publisher = "COLING",
url = "https://www.aclweb.org/anthology/C04-1072",
pages = "501--507",
}
'''
SCREAMING_SNAKE_CASE__ = '''\
BLEU (bilingual evaluation understudy) is an algorithm for evaluating the quality of text which has been machine-translated from one natural language to another.
Quality is considered to be the correspondence between a machine\'s output and that of a human: "the closer a machine translation is to a professional human translation,
the better it is" – this is the central idea behind BLEU. BLEU was one of the first metrics to claim a high correlation with human judgements of quality, and
remains one of the most popular automated and inexpensive metrics.
Scores are calculated for individual translated segments—generally sentences—by comparing them with a set of good quality reference translations.
Those scores are then averaged over the whole corpus to reach an estimate of the translation\'s overall quality. Intelligibility or grammatical correctness
are not taken into account[citation needed].
BLEU\'s output is always a number between 0 and 1. This value indicates how similar the candidate text is to the reference texts, with values closer to 1
representing more similar texts. Few human translations will attain a score of 1, since this would indicate that the candidate is identical to one of the
reference translations. For this reason, it is not necessary to attain a score of 1. Because there are more opportunities to match, adding additional
reference translations will increase the BLEU score.
'''
SCREAMING_SNAKE_CASE__ = '''
Computes BLEU score of translated segments against one or more references.
Args:
predictions: list of translations to score.
Each translation should be tokenized into a list of tokens.
references: list of lists of references for each translation.
Each reference should be tokenized into a list of tokens.
max_order: Maximum n-gram order to use when computing BLEU score.
smooth: Whether or not to apply Lin et al. 2004 smoothing.
Returns:
\'bleu\': bleu score,
\'precisions\': geometric mean of n-gram precisions,
\'brevity_penalty\': brevity penalty,
\'length_ratio\': ratio of lengths,
\'translation_length\': translation_length,
\'reference_length\': reference_length
Examples:
>>> predictions = [
... ["hello", "there", "general", "kenobi"], # tokenized prediction of the first sample
... ["foo", "bar", "foobar"] # tokenized prediction of the second sample
... ]
>>> references = [
... [["hello", "there", "general", "kenobi"], ["hello", "there", "!"]], # tokenized references for the first sample (2 references)
... [["foo", "bar", "foobar"]] # tokenized references for the second sample (1 reference)
... ]
>>> bleu = datasets.load_metric("bleu")
>>> results = bleu.compute(predictions=predictions, references=references)
>>> print(results["bleu"])
1.0
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _UpperCamelCase( datasets.Metric ):
def __lowerCAmelCase ( self : Dict ):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Sequence(datasets.Value('string' , id='token' ) , id='sequence' ),
'references': datasets.Sequence(
datasets.Sequence(datasets.Value('string' , id='token' ) , id='sequence' ) , id='references' ),
} ) , codebase_urls=['https://github.com/tensorflow/nmt/blob/master/nmt/scripts/bleu.py'] , reference_urls=[
'https://en.wikipedia.org/wiki/BLEU',
'https://towardsdatascience.com/evaluating-text-output-in-nlp-bleu-at-your-own-risk-e8609665a213',
] , )
def __lowerCAmelCase ( self : Dict , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Tuple=4 , SCREAMING_SNAKE_CASE__ : Dict=False ):
'''simple docstring'''
__a : List[str] = compute_bleu(
reference_corpus=SCREAMING_SNAKE_CASE__ , translation_corpus=SCREAMING_SNAKE_CASE__ , max_order=SCREAMING_SNAKE_CASE__ , smooth=SCREAMING_SNAKE_CASE__ )
((__a) , (__a) , (__a) , (__a) , (__a) , (__a)) : List[Any] = score
return {
"bleu": bleu,
"precisions": precisions,
"brevity_penalty": bp,
"length_ratio": ratio,
"translation_length": translation_length,
"reference_length": reference_length,
}
| 47 |
import math
from datetime import datetime, timedelta
def UpperCAmelCase__ ( lowerCamelCase_ : int ):
__a : Union[str, Any] = year % 1_9
__a : int = year % 4
__a : Optional[int] = year % 7
__a : Dict = math.floor(year / 1_0_0 )
__a : Optional[Any] = math.floor((1_3 + 8 * leap_day_inhibits) / 2_5 )
__a : Union[str, Any] = leap_day_inhibits / 4
__a : str = (
1_5 - lunar_orbit_correction + leap_day_inhibits - leap_day_reinstall_number
) % 3_0
__a : Union[str, Any] = (4 + leap_day_inhibits - leap_day_reinstall_number) % 7
# days to be added to March 21
__a : List[Any] = (1_9 * metonic_cycle + secular_moon_shift) % 3_0
# PHM -> Paschal Full Moon
__a : List[Any] = (
2 * julian_leap_year
+ 4 * non_leap_year
+ 6 * days_to_add
+ century_starting_point
) % 7
if days_to_add == 2_9 and days_from_phm_to_sunday == 6:
return datetime(lowerCamelCase_ , 4 , 1_9 )
elif days_to_add == 2_8 and days_from_phm_to_sunday == 6:
return datetime(lowerCamelCase_ , 4 , 1_8 )
else:
return datetime(lowerCamelCase_ , 3 , 2_2 ) + timedelta(
days=int(days_to_add + days_from_phm_to_sunday ) )
if __name__ == "__main__":
for year in (1994, 2000, 2010, 2021, 2023):
SCREAMING_SNAKE_CASE__ = '''will be''' if year > datetime.now().year else '''was'''
print(F"Easter in {year} {tense} {gauss_easter(year)}")
| 47 | 1 |
import argparse
import os
import numpy as np
import tensorflow as tf
import torch
from transformers import BertModel
def UpperCAmelCase__ ( lowerCamelCase_ : BertModel , lowerCamelCase_ : str , lowerCamelCase_ : str ):
__a : str = ('dense.weight', 'attention.self.query', 'attention.self.key', 'attention.self.value')
__a : int = (
('layer.', 'layer_'),
('word_embeddings.weight', 'word_embeddings'),
('position_embeddings.weight', 'position_embeddings'),
('token_type_embeddings.weight', 'token_type_embeddings'),
('.', '/'),
('LayerNorm/weight', 'LayerNorm/gamma'),
('LayerNorm/bias', 'LayerNorm/beta'),
('weight', 'kernel'),
)
if not os.path.isdir(lowerCamelCase_ ):
os.makedirs(lowerCamelCase_ )
__a : Union[str, Any] = model.state_dict()
def to_tf_var_name(lowerCamelCase_ : str ):
for patt, repl in iter(lowerCamelCase_ ):
__a : Union[str, Any] = name.replace(lowerCamelCase_ , lowerCamelCase_ )
return f'''bert/{name}'''
def create_tf_var(lowerCamelCase_ : np.ndarray , lowerCamelCase_ : str , lowerCamelCase_ : tf.Session ):
__a : List[Any] = tf.dtypes.as_dtype(tensor.dtype )
__a : List[str] = tf.get_variable(dtype=lowerCamelCase_ , shape=tensor.shape , name=lowerCamelCase_ , initializer=tf.zeros_initializer() )
session.run(tf.variables_initializer([tf_var] ) )
session.run(lowerCamelCase_ )
return tf_var
tf.reset_default_graph()
with tf.Session() as session:
for var_name in state_dict:
__a : Optional[Any] = to_tf_var_name(lowerCamelCase_ )
__a : Optional[Any] = state_dict[var_name].numpy()
if any(x in var_name for x in tensors_to_transpose ):
__a : Union[str, Any] = torch_tensor.T
__a : str = create_tf_var(tensor=lowerCamelCase_ , name=lowerCamelCase_ , session=lowerCamelCase_ )
tf.keras.backend.set_value(lowerCamelCase_ , lowerCamelCase_ )
__a : Union[str, Any] = session.run(lowerCamelCase_ )
print(f'''Successfully created {tf_name}: {np.allclose(lowerCamelCase_ , lowerCamelCase_ )}''' )
__a : Any = tf.train.Saver(tf.trainable_variables() )
saver.save(lowerCamelCase_ , os.path.join(lowerCamelCase_ , model_name.replace('-' , '_' ) + '.ckpt' ) )
def UpperCAmelCase__ ( lowerCamelCase_ : Union[str, Any]=None ):
__a : str = argparse.ArgumentParser()
parser.add_argument('--model_name' , type=lowerCamelCase_ , required=lowerCamelCase_ , help='model name e.g. bert-base-uncased' )
parser.add_argument(
'--cache_dir' , type=lowerCamelCase_ , default=lowerCamelCase_ , required=lowerCamelCase_ , help='Directory containing pytorch model' )
parser.add_argument('--pytorch_model_path' , type=lowerCamelCase_ , required=lowerCamelCase_ , help='/path/to/<pytorch-model-name>.bin' )
parser.add_argument('--tf_cache_dir' , type=lowerCamelCase_ , required=lowerCamelCase_ , help='Directory in which to save tensorflow model' )
__a : List[Any] = parser.parse_args(lowerCamelCase_ )
__a : Optional[Any] = BertModel.from_pretrained(
pretrained_model_name_or_path=args.model_name , state_dict=torch.load(args.pytorch_model_path ) , cache_dir=args.cache_dir , )
convert_pytorch_checkpoint_to_tf(model=lowerCamelCase_ , ckpt_dir=args.tf_cache_dir , model_name=args.model_name )
if __name__ == "__main__":
main()
| 47 |
from typing import List, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ = {
'''huggingface/informer-tourism-monthly''': (
'''https://huggingface.co/huggingface/informer-tourism-monthly/resolve/main/config.json'''
),
# See all Informer models at https://huggingface.co/models?filter=informer
}
class _UpperCamelCase( __lowerCamelCase ):
__SCREAMING_SNAKE_CASE : List[Any] = '''informer'''
__SCREAMING_SNAKE_CASE : List[Any] = {
'''hidden_size''': '''d_model''',
'''num_attention_heads''': '''encoder_attention_heads''',
'''num_hidden_layers''': '''encoder_layers''',
}
def __init__( self : Optional[int] , SCREAMING_SNAKE_CASE__ : Optional[int] = None , SCREAMING_SNAKE_CASE__ : Optional[int] = None , SCREAMING_SNAKE_CASE__ : str = "student_t" , SCREAMING_SNAKE_CASE__ : str = "nll" , SCREAMING_SNAKE_CASE__ : int = 1 , SCREAMING_SNAKE_CASE__ : List[int] = None , SCREAMING_SNAKE_CASE__ : Optional[Union[str, bool]] = "mean" , SCREAMING_SNAKE_CASE__ : int = 0 , SCREAMING_SNAKE_CASE__ : int = 0 , SCREAMING_SNAKE_CASE__ : int = 0 , SCREAMING_SNAKE_CASE__ : int = 0 , SCREAMING_SNAKE_CASE__ : Optional[List[int]] = None , SCREAMING_SNAKE_CASE__ : Optional[List[int]] = None , SCREAMING_SNAKE_CASE__ : int = 6_4 , SCREAMING_SNAKE_CASE__ : int = 3_2 , SCREAMING_SNAKE_CASE__ : int = 3_2 , SCREAMING_SNAKE_CASE__ : int = 2 , SCREAMING_SNAKE_CASE__ : int = 2 , SCREAMING_SNAKE_CASE__ : int = 2 , SCREAMING_SNAKE_CASE__ : int = 2 , SCREAMING_SNAKE_CASE__ : bool = True , SCREAMING_SNAKE_CASE__ : str = "gelu" , SCREAMING_SNAKE_CASE__ : float = 0.05 , SCREAMING_SNAKE_CASE__ : float = 0.1 , SCREAMING_SNAKE_CASE__ : float = 0.1 , SCREAMING_SNAKE_CASE__ : float = 0.1 , SCREAMING_SNAKE_CASE__ : float = 0.1 , SCREAMING_SNAKE_CASE__ : int = 1_0_0 , SCREAMING_SNAKE_CASE__ : float = 0.02 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=True , SCREAMING_SNAKE_CASE__ : str = "prob" , SCREAMING_SNAKE_CASE__ : int = 5 , SCREAMING_SNAKE_CASE__ : bool = True , **SCREAMING_SNAKE_CASE__ : Tuple , ):
'''simple docstring'''
__a : Dict = prediction_length
__a : Tuple = context_length or prediction_length
__a : Tuple = distribution_output
__a : Tuple = loss
__a : str = input_size
__a : Dict = num_time_features
__a : Optional[int] = lags_sequence if lags_sequence is not None else [1, 2, 3, 4, 5, 6, 7]
__a : str = scaling
__a : Tuple = num_dynamic_real_features
__a : int = num_static_real_features
__a : Dict = num_static_categorical_features
# set cardinality
if cardinality and num_static_categorical_features > 0:
if len(SCREAMING_SNAKE_CASE__ ) != num_static_categorical_features:
raise ValueError(
'The cardinality should be a list of the same length as `num_static_categorical_features`' )
__a : Optional[Any] = cardinality
else:
__a : Optional[int] = [0]
# set embedding_dimension
if embedding_dimension and num_static_categorical_features > 0:
if len(SCREAMING_SNAKE_CASE__ ) != num_static_categorical_features:
raise ValueError(
'The embedding dimension should be a list of the same length as `num_static_categorical_features`' )
__a : int = embedding_dimension
else:
__a : List[Any] = [min(5_0 , (cat + 1) // 2 ) for cat in self.cardinality]
__a : int = num_parallel_samples
# Transformer architecture configuration
__a : str = input_size * len(self.lags_sequence ) + self._number_of_features
__a : Optional[int] = d_model
__a : Union[str, Any] = encoder_attention_heads
__a : int = decoder_attention_heads
__a : Any = encoder_ffn_dim
__a : Union[str, Any] = decoder_ffn_dim
__a : List[Any] = encoder_layers
__a : Optional[int] = decoder_layers
__a : int = dropout
__a : Optional[Any] = attention_dropout
__a : Dict = activation_dropout
__a : Union[str, Any] = encoder_layerdrop
__a : Optional[int] = decoder_layerdrop
__a : List[str] = activation_function
__a : str = init_std
__a : Optional[int] = use_cache
# Informer
__a : Union[str, Any] = attention_type
__a : str = sampling_factor
__a : Dict = distil
super().__init__(is_encoder_decoder=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
@property
def __lowerCAmelCase ( self : Any ):
'''simple docstring'''
return (
sum(self.embedding_dimension )
+ self.num_dynamic_real_features
+ self.num_time_features
+ self.num_static_real_features
+ self.input_size * 2 # the log1p(abs(loc)) and log(scale) features
)
| 47 | 1 |
import inspect
import re
from hashlib import shaaaa
from typing import Dict, List
from .arrow import arrow
from .audiofolder import audiofolder
from .csv import csv
from .imagefolder import imagefolder
from .json import json
from .pandas import pandas
from .parquet import parquet
from .sql import sql # noqa F401
from .text import text
def UpperCAmelCase__ ( lowerCamelCase_ : List[str] ):
__a : List[Any] = []
for line in lines:
__a : Optional[Any] = re.sub(R'#.*' , '' , lowerCamelCase_ ) # remove comments
if line:
filtered_lines.append(lowerCamelCase_ )
__a : Optional[Any] = '\n'.join(lowerCamelCase_ )
# Make a hash from all this code
__a : Optional[Any] = full_str.encode('utf-8' )
return shaaaa(lowerCamelCase_ ).hexdigest()
# get importable module names and hash for caching
SCREAMING_SNAKE_CASE__ = {
'''csv''': (csv.__name__, _hash_python_lines(inspect.getsource(csv).splitlines())),
'''json''': (json.__name__, _hash_python_lines(inspect.getsource(json).splitlines())),
'''pandas''': (pandas.__name__, _hash_python_lines(inspect.getsource(pandas).splitlines())),
'''parquet''': (parquet.__name__, _hash_python_lines(inspect.getsource(parquet).splitlines())),
'''arrow''': (arrow.__name__, _hash_python_lines(inspect.getsource(arrow).splitlines())),
'''text''': (text.__name__, _hash_python_lines(inspect.getsource(text).splitlines())),
'''imagefolder''': (imagefolder.__name__, _hash_python_lines(inspect.getsource(imagefolder).splitlines())),
'''audiofolder''': (audiofolder.__name__, _hash_python_lines(inspect.getsource(audiofolder).splitlines())),
}
# Used to infer the module to use based on the data files extensions
SCREAMING_SNAKE_CASE__ = {
'''.csv''': ('''csv''', {}),
'''.tsv''': ('''csv''', {'''sep''': '''\t'''}),
'''.json''': ('''json''', {}),
'''.jsonl''': ('''json''', {}),
'''.parquet''': ('''parquet''', {}),
'''.arrow''': ('''arrow''', {}),
'''.txt''': ('''text''', {}),
}
_EXTENSION_TO_MODULE.update({ext: ('''imagefolder''', {}) for ext in imagefolder.ImageFolder.EXTENSIONS})
_EXTENSION_TO_MODULE.update({ext.upper(): ('''imagefolder''', {}) for ext in imagefolder.ImageFolder.EXTENSIONS})
_EXTENSION_TO_MODULE.update({ext: ('''audiofolder''', {}) for ext in audiofolder.AudioFolder.EXTENSIONS})
_EXTENSION_TO_MODULE.update({ext.upper(): ('''audiofolder''', {}) for ext in audiofolder.AudioFolder.EXTENSIONS})
SCREAMING_SNAKE_CASE__ = {'''imagefolder''', '''audiofolder'''}
# Used to filter data files based on extensions given a module name
SCREAMING_SNAKE_CASE__ = {}
for _ext, (_module, _) in _EXTENSION_TO_MODULE.items():
_MODULE_TO_EXTENSIONS.setdefault(_module, []).append(_ext)
_MODULE_TO_EXTENSIONS["imagefolder"].append('''.zip''')
_MODULE_TO_EXTENSIONS["audiofolder"].append('''.zip''')
| 47 |
import torch
from diffusers import DDIMParallelScheduler
from .test_schedulers import SchedulerCommonTest
class _UpperCamelCase( __lowerCamelCase ):
__SCREAMING_SNAKE_CASE : int = (DDIMParallelScheduler,)
__SCREAMING_SNAKE_CASE : Union[str, Any] = (('''eta''', 0.0), ('''num_inference_steps''', 50))
def __lowerCAmelCase ( self : str , **SCREAMING_SNAKE_CASE__ : Optional[int] ):
'''simple docstring'''
__a : List[Any] = {
'num_train_timesteps': 1_0_0_0,
'beta_start': 0.0_001,
'beta_end': 0.02,
'beta_schedule': 'linear',
'clip_sample': True,
}
config.update(**SCREAMING_SNAKE_CASE__ )
return config
def __lowerCAmelCase ( self : str , **SCREAMING_SNAKE_CASE__ : Optional[int] ):
'''simple docstring'''
__a : Tuple = self.scheduler_classes[0]
__a : Optional[Any] = self.get_scheduler_config(**SCREAMING_SNAKE_CASE__ )
__a : Any = scheduler_class(**SCREAMING_SNAKE_CASE__ )
__a , __a : List[str] = 1_0, 0.0
__a : Dict = self.dummy_model()
__a : str = self.dummy_sample_deter
scheduler.set_timesteps(SCREAMING_SNAKE_CASE__ )
for t in scheduler.timesteps:
__a : str = model(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
__a : List[str] = scheduler.step(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ).prev_sample
return sample
def __lowerCAmelCase ( self : Tuple ):
'''simple docstring'''
for timesteps in [1_0_0, 5_0_0, 1_0_0_0]:
self.check_over_configs(num_train_timesteps=SCREAMING_SNAKE_CASE__ )
def __lowerCAmelCase ( self : Dict ):
'''simple docstring'''
for steps_offset in [0, 1]:
self.check_over_configs(steps_offset=SCREAMING_SNAKE_CASE__ )
__a : Optional[Any] = self.scheduler_classes[0]
__a : List[str] = self.get_scheduler_config(steps_offset=1 )
__a : Optional[Any] = scheduler_class(**SCREAMING_SNAKE_CASE__ )
scheduler.set_timesteps(5 )
assert torch.equal(scheduler.timesteps , torch.LongTensor([8_0_1, 6_0_1, 4_0_1, 2_0_1, 1] ) )
def __lowerCAmelCase ( self : Optional[int] ):
'''simple docstring'''
for beta_start, beta_end in zip([0.0_001, 0.001, 0.01, 0.1] , [0.002, 0.02, 0.2, 2] ):
self.check_over_configs(beta_start=SCREAMING_SNAKE_CASE__ , beta_end=SCREAMING_SNAKE_CASE__ )
def __lowerCAmelCase ( self : int ):
'''simple docstring'''
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=SCREAMING_SNAKE_CASE__ )
def __lowerCAmelCase ( self : Optional[Any] ):
'''simple docstring'''
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=SCREAMING_SNAKE_CASE__ )
def __lowerCAmelCase ( self : List[Any] ):
'''simple docstring'''
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=SCREAMING_SNAKE_CASE__ )
def __lowerCAmelCase ( self : Optional[Any] ):
'''simple docstring'''
for timestep_spacing in ["trailing", "leading"]:
self.check_over_configs(timestep_spacing=SCREAMING_SNAKE_CASE__ )
def __lowerCAmelCase ( self : int ):
'''simple docstring'''
for rescale_betas_zero_snr in [True, False]:
self.check_over_configs(rescale_betas_zero_snr=SCREAMING_SNAKE_CASE__ )
def __lowerCAmelCase ( self : str ):
'''simple docstring'''
self.check_over_configs(thresholding=SCREAMING_SNAKE_CASE__ )
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(
thresholding=SCREAMING_SNAKE_CASE__ , prediction_type=SCREAMING_SNAKE_CASE__ , sample_max_value=SCREAMING_SNAKE_CASE__ , )
def __lowerCAmelCase ( self : Optional[int] ):
'''simple docstring'''
for t in [1, 1_0, 4_9]:
self.check_over_forward(time_step=SCREAMING_SNAKE_CASE__ )
def __lowerCAmelCase ( self : List[str] ):
'''simple docstring'''
for t, num_inference_steps in zip([1, 1_0, 5_0] , [1_0, 5_0, 5_0_0] ):
self.check_over_forward(time_step=SCREAMING_SNAKE_CASE__ , num_inference_steps=SCREAMING_SNAKE_CASE__ )
def __lowerCAmelCase ( self : List[Any] ):
'''simple docstring'''
for t, eta in zip([1, 1_0, 4_9] , [0.0, 0.5, 1.0] ):
self.check_over_forward(time_step=SCREAMING_SNAKE_CASE__ , eta=SCREAMING_SNAKE_CASE__ )
def __lowerCAmelCase ( self : List[Any] ):
'''simple docstring'''
__a : List[str] = self.scheduler_classes[0]
__a : Union[str, Any] = self.get_scheduler_config()
__a : Any = scheduler_class(**SCREAMING_SNAKE_CASE__ )
assert torch.sum(torch.abs(scheduler._get_variance(0 , 0 ) - 0.0 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(4_2_0 , 4_0_0 ) - 0.14_771 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(9_8_0 , 9_6_0 ) - 0.32_460 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(0 , 0 ) - 0.0 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(4_8_7 , 4_8_6 ) - 0.00_979 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(9_9_9 , 9_9_8 ) - 0.02 ) ) < 1e-5
def __lowerCAmelCase ( self : Optional[Any] ):
'''simple docstring'''
__a : List[str] = self.scheduler_classes[0]
__a : List[str] = self.get_scheduler_config()
__a : Optional[Any] = scheduler_class(**SCREAMING_SNAKE_CASE__ )
__a , __a : Any = 1_0, 0.0
scheduler.set_timesteps(SCREAMING_SNAKE_CASE__ )
__a : List[Any] = self.dummy_model()
__a : int = self.dummy_sample_deter
__a : List[Any] = self.dummy_sample_deter + 0.1
__a : List[str] = self.dummy_sample_deter - 0.1
__a : Optional[Any] = samplea.shape[0]
__a : Optional[Any] = torch.stack([samplea, samplea, samplea] , dim=0 )
__a : Union[str, Any] = torch.arange(SCREAMING_SNAKE_CASE__ )[0:3, None].repeat(1 , SCREAMING_SNAKE_CASE__ )
__a : int = model(samples.flatten(0 , 1 ) , timesteps.flatten(0 , 1 ) )
__a : int = scheduler.batch_step_no_noise(SCREAMING_SNAKE_CASE__ , timesteps.flatten(0 , 1 ) , samples.flatten(0 , 1 ) , SCREAMING_SNAKE_CASE__ )
__a : Dict = torch.sum(torch.abs(SCREAMING_SNAKE_CASE__ ) )
__a : Optional[Any] = torch.mean(torch.abs(SCREAMING_SNAKE_CASE__ ) )
assert abs(result_sum.item() - 1_147.7_904 ) < 1e-2
assert abs(result_mean.item() - 0.4_982 ) < 1e-3
def __lowerCAmelCase ( self : str ):
'''simple docstring'''
__a : List[str] = self.full_loop()
__a : Tuple = torch.sum(torch.abs(SCREAMING_SNAKE_CASE__ ) )
__a : int = torch.mean(torch.abs(SCREAMING_SNAKE_CASE__ ) )
assert abs(result_sum.item() - 172.0_067 ) < 1e-2
assert abs(result_mean.item() - 0.223_967 ) < 1e-3
def __lowerCAmelCase ( self : Optional[int] ):
'''simple docstring'''
__a : Optional[int] = self.full_loop(prediction_type='v_prediction' )
__a : str = torch.sum(torch.abs(SCREAMING_SNAKE_CASE__ ) )
__a : Union[str, Any] = torch.mean(torch.abs(SCREAMING_SNAKE_CASE__ ) )
assert abs(result_sum.item() - 52.5_302 ) < 1e-2
assert abs(result_mean.item() - 0.0_684 ) < 1e-3
def __lowerCAmelCase ( self : int ):
'''simple docstring'''
__a : Union[str, Any] = self.full_loop(set_alpha_to_one=SCREAMING_SNAKE_CASE__ , beta_start=0.01 )
__a : Optional[int] = torch.sum(torch.abs(SCREAMING_SNAKE_CASE__ ) )
__a : Optional[int] = torch.mean(torch.abs(SCREAMING_SNAKE_CASE__ ) )
assert abs(result_sum.item() - 149.8_295 ) < 1e-2
assert abs(result_mean.item() - 0.1_951 ) < 1e-3
def __lowerCAmelCase ( self : str ):
'''simple docstring'''
__a : Dict = self.full_loop(set_alpha_to_one=SCREAMING_SNAKE_CASE__ , beta_start=0.01 )
__a : str = torch.sum(torch.abs(SCREAMING_SNAKE_CASE__ ) )
__a : Tuple = torch.mean(torch.abs(SCREAMING_SNAKE_CASE__ ) )
assert abs(result_sum.item() - 149.0_784 ) < 1e-2
assert abs(result_mean.item() - 0.1_941 ) < 1e-3
| 47 | 1 |
import random
import timeit
from functools import wraps
from typing import Callable, Optional
from ..configuration_utils import PretrainedConfig
from ..models.auto.modeling_tf_auto import TF_MODEL_MAPPING, TF_MODEL_WITH_LM_HEAD_MAPPING
from ..utils import is_pyanvml_available, is_tf_available, logging
from .benchmark_utils import (
Benchmark,
Memory,
MemorySummary,
measure_peak_memory_cpu,
start_memory_tracing,
stop_memory_tracing,
)
if is_tf_available():
import tensorflow as tf
from tensorflow.python.framework.errors_impl import ResourceExhaustedError
from .benchmark_args_tf import TensorFlowBenchmarkArguments
if is_pyanvml_available():
import pyanvml.pyanvml as nvml
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
def UpperCAmelCase__ ( lowerCamelCase_ : bool , lowerCamelCase_ : bool ):
def run_func(lowerCamelCase_ : str ):
@wraps(lowerCamelCase_ )
def run_in_eager_mode(*lowerCamelCase_ : Union[str, Any] , **lowerCamelCase_ : List[str] ):
return func(*lowerCamelCase_ , **lowerCamelCase_ )
@wraps(lowerCamelCase_ )
@tf.function(experimental_compile=lowerCamelCase_ )
def run_in_graph_mode(*lowerCamelCase_ : Dict , **lowerCamelCase_ : Optional[Any] ):
return func(*lowerCamelCase_ , **lowerCamelCase_ )
if do_eager_mode is True:
if use_xla is not False:
raise ValueError(
'Cannot run model in XLA, if `args.eager_mode` is set to `True`. Please set `args.eager_mode=False`.' )
return run_in_eager_mode
else:
return run_in_graph_mode
return run_func
def UpperCAmelCase__ ( lowerCamelCase_ : int , lowerCamelCase_ : int , lowerCamelCase_ : int ):
__a : List[str] = random.Random()
__a : Optional[Any] = [rng.randint(0 , vocab_size - 1 ) for i in range(batch_size * sequence_length )]
return tf.constant(lowerCamelCase_ , shape=(batch_size, sequence_length) , dtype=tf.intaa )
class _UpperCamelCase( __lowerCamelCase ):
__SCREAMING_SNAKE_CASE : TensorFlowBenchmarkArguments
__SCREAMING_SNAKE_CASE : PretrainedConfig
__SCREAMING_SNAKE_CASE : str = "TensorFlow"
@property
def __lowerCAmelCase ( self : int ):
'''simple docstring'''
return tf.__version__
def __lowerCAmelCase ( self : int , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int ):
'''simple docstring'''
__a : int = self.args.strategy
if strategy is None:
raise ValueError('A device strategy has to be initialized before using TensorFlow.' )
__a : Optional[int] = self._prepare_inference_func(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
return self._measure_speed(_inference )
def __lowerCAmelCase ( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int ):
'''simple docstring'''
__a : Tuple = self.args.strategy
if strategy is None:
raise ValueError('A device strategy has to be initialized before using TensorFlow.' )
__a : Optional[int] = self._prepare_train_func(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
return self._measure_speed(_train )
def __lowerCAmelCase ( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int ):
'''simple docstring'''
if self.args.is_gpu:
tf.config.experimental.set_memory_growth(self.args.gpu_list[self.args.device_idx] , SCREAMING_SNAKE_CASE__ )
__a : Any = self.args.strategy
if strategy is None:
raise ValueError('A device strategy has to be initialized before using TensorFlow.' )
__a : Optional[Any] = self._prepare_inference_func(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
return self._measure_memory(_inference )
def __lowerCAmelCase ( self : Union[str, Any] , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int ):
'''simple docstring'''
if self.args.is_gpu:
tf.config.experimental.set_memory_growth(self.args.gpu_list[self.args.device_idx] , SCREAMING_SNAKE_CASE__ )
__a : Tuple = self.args.strategy
if strategy is None:
raise ValueError('A device strategy has to be initialized before using TensorFlow.' )
__a : List[Any] = self._prepare_train_func(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
return self._measure_memory(_train )
def __lowerCAmelCase ( self : Optional[int] , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int ):
'''simple docstring'''
__a : str = self.config_dict[model_name]
if self.args.fpaa:
raise NotImplementedError('Mixed precision is currently not supported.' )
__a : Any = (
hasattr(SCREAMING_SNAKE_CASE__ , 'architectures' )
and isinstance(config.architectures , SCREAMING_SNAKE_CASE__ )
and len(config.architectures ) > 0
)
if not self.args.only_pretrain_model and has_model_class_in_config:
try:
__a : Optional[Any] = 'TF' + config.architectures[0] # prepend 'TF' for tensorflow model
__a : List[Any] = __import__('transformers' , fromlist=[model_class] )
__a : Dict = getattr(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
__a : Any = model_cls(SCREAMING_SNAKE_CASE__ )
except ImportError:
raise ImportError(
f'''{model_class} does not exist. If you just want to test the pretrained model, you might want to'''
' set `--only_pretrain_model` or `args.only_pretrain_model=True`.' )
else:
__a : Optional[int] = TF_MODEL_MAPPING[config.__class__](SCREAMING_SNAKE_CASE__ )
# encoder-decoder has vocab size saved differently
__a : List[str] = config.vocab_size if hasattr(SCREAMING_SNAKE_CASE__ , 'vocab_size' ) else config.encoder.vocab_size
__a : Optional[Any] = random_input_ids(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
@run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla )
def encoder_decoder_forward():
return model(SCREAMING_SNAKE_CASE__ , decoder_input_ids=SCREAMING_SNAKE_CASE__ , training=SCREAMING_SNAKE_CASE__ )
@run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla )
def encoder_forward():
return model(SCREAMING_SNAKE_CASE__ , training=SCREAMING_SNAKE_CASE__ )
__a : Any = encoder_decoder_forward if config.is_encoder_decoder else encoder_forward
return _inference
def __lowerCAmelCase ( self : List[Any] , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int ):
'''simple docstring'''
__a : Optional[Any] = self.config_dict[model_name]
if self.args.eager_mode is not False:
raise ValueError('Training cannot be done in eager mode. Please make sure that `args.eager_mode = False`.' )
if self.args.fpaa:
raise NotImplementedError('Mixed precision is currently not supported.' )
__a : Optional[int] = (
hasattr(SCREAMING_SNAKE_CASE__ , 'architectures' )
and isinstance(config.architectures , SCREAMING_SNAKE_CASE__ )
and len(config.architectures ) > 0
)
if not self.args.only_pretrain_model and has_model_class_in_config:
try:
__a : List[str] = 'TF' + config.architectures[0] # prepend 'TF' for tensorflow model
__a : Dict = __import__('transformers' , fromlist=[model_class] )
__a : str = getattr(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
__a : Tuple = model_cls(SCREAMING_SNAKE_CASE__ )
except ImportError:
raise ImportError(
f'''{model_class} does not exist. If you just want to test the pretrained model, you might want to'''
' set `--only_pretrain_model` or `args.only_pretrain_model=True`.' )
else:
__a : Union[str, Any] = TF_MODEL_WITH_LM_HEAD_MAPPING[config.__class__](SCREAMING_SNAKE_CASE__ )
# encoder-decoder has vocab size saved differently
__a : int = config.vocab_size if hasattr(SCREAMING_SNAKE_CASE__ , 'vocab_size' ) else config.encoder.vocab_size
__a : Tuple = random_input_ids(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
@run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla )
def encoder_decoder_train():
__a : Union[str, Any] = model(SCREAMING_SNAKE_CASE__ , decoder_input_ids=SCREAMING_SNAKE_CASE__ , labels=SCREAMING_SNAKE_CASE__ , training=SCREAMING_SNAKE_CASE__ )[0]
__a : List[str] = tf.gradients(SCREAMING_SNAKE_CASE__ , model.trainable_variables )
return gradients
@run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla )
def encoder_train():
__a : Tuple = model(SCREAMING_SNAKE_CASE__ , labels=SCREAMING_SNAKE_CASE__ , training=SCREAMING_SNAKE_CASE__ )[0]
__a : Tuple = tf.gradients(SCREAMING_SNAKE_CASE__ , model.trainable_variables )
return gradients
__a : Optional[int] = encoder_decoder_train if config.is_encoder_decoder else encoder_train
return _train
def __lowerCAmelCase ( self : Dict , SCREAMING_SNAKE_CASE__ : Dict ):
'''simple docstring'''
with self.args.strategy.scope():
try:
if self.args.is_tpu or self.args.use_xla:
# run additional 10 times to stabilize compilation for tpu
logger.info('Do inference on TPU. Running model 5 times to stabilize compilation' )
timeit.repeat(SCREAMING_SNAKE_CASE__ , repeat=1 , number=5 )
# as written in https://docs.python.org/2/library/timeit.html#timeit.Timer.repeat, min should be taken rather than the average
__a : Optional[Any] = timeit.repeat(
SCREAMING_SNAKE_CASE__ , repeat=self.args.repeat , number=1_0 , )
return min(SCREAMING_SNAKE_CASE__ ) / 10.0
except ResourceExhaustedError as e:
self.print_fn(f'''Doesn\'t fit on GPU. {e}''' )
def __lowerCAmelCase ( self : Any , SCREAMING_SNAKE_CASE__ : Callable[[], None] ):
'''simple docstring'''
logger.info(
'Note that TensorFlow allocates more memory than '
'it might need to speed up computation. '
'The memory reported here corresponds to the memory '
'reported by `nvidia-smi`, which can vary depending '
'on total available memory on the GPU that is used.' )
with self.args.strategy.scope():
try:
if self.args.trace_memory_line_by_line:
if not self.args.eager_mode:
raise ValueError(
'`args.eager_mode` is set to `False`. Make sure to run model in eager mode to measure memory'
' consumption line by line.' )
__a : Any = start_memory_tracing('transformers' )
if self.args.is_tpu:
# tpu
raise NotImplementedError(
'Memory Benchmarking is currently not implemented for TPU. Please disable memory benchmarking'
' with `args.memory=False`' )
elif self.args.is_gpu:
# gpu
if not is_pyanvml_available():
logger.warning(
'py3nvml not installed, we won\'t log GPU memory usage. '
'Install py3nvml (pip install py3nvml) to log information about GPU.' )
__a : Any = 'N/A'
else:
logger.info(
'Measuring total GPU usage on GPU device. Make sure to not have additional processes'
' running on the same GPU.' )
# init nvml
nvml.nvmlInit()
func()
__a : Tuple = nvml.nvmlDeviceGetHandleByIndex(self.args.device_idx )
__a : Optional[int] = nvml.nvmlDeviceGetMemoryInfo(SCREAMING_SNAKE_CASE__ )
__a : List[Any] = meminfo.used
__a : Dict = Memory(SCREAMING_SNAKE_CASE__ )
# shutdown nvml
nvml.nvmlShutdown()
else:
# cpu
if self.args.trace_memory_line_by_line:
logger.info(
'When enabling line by line tracing, the max peak memory for CPU is inaccurate in'
' TensorFlow.' )
__a : Any = None
else:
__a : Optional[Any] = measure_peak_memory_cpu(SCREAMING_SNAKE_CASE__ )
__a : Any = Memory(SCREAMING_SNAKE_CASE__ ) if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) else memory_bytes
if self.args.trace_memory_line_by_line:
__a : Any = stop_memory_tracing(SCREAMING_SNAKE_CASE__ )
if memory is None:
__a : Any = summary.total
else:
__a : List[str] = None
return memory, summary
except ResourceExhaustedError as e:
self.print_fn(f'''Doesn\'t fit on GPU. {e}''' )
return "N/A", None
| 47 |
def UpperCAmelCase__ ( lowerCamelCase_ : list[int] , lowerCamelCase_ : list[int] ):
# Check if the input is valid
if not len(lowerCamelCase_ ) == len(lowerCamelCase_ ) == 3:
raise ValueError('Please enter a valid equation.' )
if equationa[0] == equationa[1] == equationa[0] == equationa[1] == 0:
raise ValueError('Both a & b of two equations can\'t be zero.' )
# Extract the coefficients
__a , __a , __a : Optional[Any] = equationa
__a , __a , __a : Optional[int] = equationa
# Calculate the determinants of the matrices
__a : str = aa * ba - aa * ba
__a : Tuple = ca * ba - ca * ba
__a : Union[str, Any] = aa * ca - aa * ca
# Check if the system of linear equations has a solution (using Cramer's rule)
if determinant == 0:
if determinant_x == determinant_y == 0:
raise ValueError('Infinite solutions. (Consistent system)' )
else:
raise ValueError('No solution. (Inconsistent system)' )
else:
if determinant_x == determinant_y == 0:
# Trivial solution (Inconsistent system)
return (0.0, 0.0)
else:
__a : Any = determinant_x / determinant
__a : Optional[Any] = determinant_y / determinant
# Non-Trivial Solution (Consistent system)
return (x, y)
| 47 | 1 |
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from tokenizers import processors
from ...tokenization_utils import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_nllb import NllbTokenizer
else:
SCREAMING_SNAKE_CASE__ = None
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ = {'''vocab_file''': '''sentencepiece.bpe.model''', '''tokenizer_file''': '''tokenizer.json'''}
SCREAMING_SNAKE_CASE__ = {
'''vocab_file''': {
'''facebook/nllb-200-distilled-600M''': (
'''https://huggingface.co/facebook/nllb-200-distilled-600M/resolve/main/sentencepiece.bpe.model'''
),
},
'''tokenizer_file''': {
'''facebook/nllb-200-distilled-600M''': (
'''https://huggingface.co/facebook/nllb-200-distilled-600M/resolve/main/tokenizer.json'''
),
},
}
SCREAMING_SNAKE_CASE__ = {
'''facebook/nllb-large-en-ro''': 1024,
'''facebook/nllb-200-distilled-600M''': 1024,
}
# fmt: off
SCREAMING_SNAKE_CASE__ = ['''ace_Arab''', '''ace_Latn''', '''acm_Arab''', '''acq_Arab''', '''aeb_Arab''', '''afr_Latn''', '''ajp_Arab''', '''aka_Latn''', '''amh_Ethi''', '''apc_Arab''', '''arb_Arab''', '''ars_Arab''', '''ary_Arab''', '''arz_Arab''', '''asm_Beng''', '''ast_Latn''', '''awa_Deva''', '''ayr_Latn''', '''azb_Arab''', '''azj_Latn''', '''bak_Cyrl''', '''bam_Latn''', '''ban_Latn''', '''bel_Cyrl''', '''bem_Latn''', '''ben_Beng''', '''bho_Deva''', '''bjn_Arab''', '''bjn_Latn''', '''bod_Tibt''', '''bos_Latn''', '''bug_Latn''', '''bul_Cyrl''', '''cat_Latn''', '''ceb_Latn''', '''ces_Latn''', '''cjk_Latn''', '''ckb_Arab''', '''crh_Latn''', '''cym_Latn''', '''dan_Latn''', '''deu_Latn''', '''dik_Latn''', '''dyu_Latn''', '''dzo_Tibt''', '''ell_Grek''', '''eng_Latn''', '''epo_Latn''', '''est_Latn''', '''eus_Latn''', '''ewe_Latn''', '''fao_Latn''', '''pes_Arab''', '''fij_Latn''', '''fin_Latn''', '''fon_Latn''', '''fra_Latn''', '''fur_Latn''', '''fuv_Latn''', '''gla_Latn''', '''gle_Latn''', '''glg_Latn''', '''grn_Latn''', '''guj_Gujr''', '''hat_Latn''', '''hau_Latn''', '''heb_Hebr''', '''hin_Deva''', '''hne_Deva''', '''hrv_Latn''', '''hun_Latn''', '''hye_Armn''', '''ibo_Latn''', '''ilo_Latn''', '''ind_Latn''', '''isl_Latn''', '''ita_Latn''', '''jav_Latn''', '''jpn_Jpan''', '''kab_Latn''', '''kac_Latn''', '''kam_Latn''', '''kan_Knda''', '''kas_Arab''', '''kas_Deva''', '''kat_Geor''', '''knc_Arab''', '''knc_Latn''', '''kaz_Cyrl''', '''kbp_Latn''', '''kea_Latn''', '''khm_Khmr''', '''kik_Latn''', '''kin_Latn''', '''kir_Cyrl''', '''kmb_Latn''', '''kon_Latn''', '''kor_Hang''', '''kmr_Latn''', '''lao_Laoo''', '''lvs_Latn''', '''lij_Latn''', '''lim_Latn''', '''lin_Latn''', '''lit_Latn''', '''lmo_Latn''', '''ltg_Latn''', '''ltz_Latn''', '''lua_Latn''', '''lug_Latn''', '''luo_Latn''', '''lus_Latn''', '''mag_Deva''', '''mai_Deva''', '''mal_Mlym''', '''mar_Deva''', '''min_Latn''', '''mkd_Cyrl''', '''plt_Latn''', '''mlt_Latn''', '''mni_Beng''', '''khk_Cyrl''', '''mos_Latn''', '''mri_Latn''', '''zsm_Latn''', '''mya_Mymr''', '''nld_Latn''', '''nno_Latn''', '''nob_Latn''', '''npi_Deva''', '''nso_Latn''', '''nus_Latn''', '''nya_Latn''', '''oci_Latn''', '''gaz_Latn''', '''ory_Orya''', '''pag_Latn''', '''pan_Guru''', '''pap_Latn''', '''pol_Latn''', '''por_Latn''', '''prs_Arab''', '''pbt_Arab''', '''quy_Latn''', '''ron_Latn''', '''run_Latn''', '''rus_Cyrl''', '''sag_Latn''', '''san_Deva''', '''sat_Beng''', '''scn_Latn''', '''shn_Mymr''', '''sin_Sinh''', '''slk_Latn''', '''slv_Latn''', '''smo_Latn''', '''sna_Latn''', '''snd_Arab''', '''som_Latn''', '''sot_Latn''', '''spa_Latn''', '''als_Latn''', '''srd_Latn''', '''srp_Cyrl''', '''ssw_Latn''', '''sun_Latn''', '''swe_Latn''', '''swh_Latn''', '''szl_Latn''', '''tam_Taml''', '''tat_Cyrl''', '''tel_Telu''', '''tgk_Cyrl''', '''tgl_Latn''', '''tha_Thai''', '''tir_Ethi''', '''taq_Latn''', '''taq_Tfng''', '''tpi_Latn''', '''tsn_Latn''', '''tso_Latn''', '''tuk_Latn''', '''tum_Latn''', '''tur_Latn''', '''twi_Latn''', '''tzm_Tfng''', '''uig_Arab''', '''ukr_Cyrl''', '''umb_Latn''', '''urd_Arab''', '''uzn_Latn''', '''vec_Latn''', '''vie_Latn''', '''war_Latn''', '''wol_Latn''', '''xho_Latn''', '''ydd_Hebr''', '''yor_Latn''', '''yue_Hant''', '''zho_Hans''', '''zho_Hant''', '''zul_Latn''']
class _UpperCamelCase( __lowerCamelCase ):
__SCREAMING_SNAKE_CASE : Tuple = VOCAB_FILES_NAMES
__SCREAMING_SNAKE_CASE : int = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__SCREAMING_SNAKE_CASE : List[Any] = PRETRAINED_VOCAB_FILES_MAP
__SCREAMING_SNAKE_CASE : Optional[int] = ['''input_ids''', '''attention_mask''']
__SCREAMING_SNAKE_CASE : Union[str, Any] = NllbTokenizer
__SCREAMING_SNAKE_CASE : List[int] = []
__SCREAMING_SNAKE_CASE : List[int] = []
def __init__( self : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Any=None , SCREAMING_SNAKE_CASE__ : Dict=None , SCREAMING_SNAKE_CASE__ : Any="<s>" , SCREAMING_SNAKE_CASE__ : Dict="</s>" , SCREAMING_SNAKE_CASE__ : Union[str, Any]="</s>" , SCREAMING_SNAKE_CASE__ : Optional[Any]="<s>" , SCREAMING_SNAKE_CASE__ : List[Any]="<unk>" , SCREAMING_SNAKE_CASE__ : Optional[Any]="<pad>" , SCREAMING_SNAKE_CASE__ : Optional[int]="<mask>" , SCREAMING_SNAKE_CASE__ : Optional[int]=None , SCREAMING_SNAKE_CASE__ : Dict=None , SCREAMING_SNAKE_CASE__ : Any=None , SCREAMING_SNAKE_CASE__ : Optional[Any]=False , **SCREAMING_SNAKE_CASE__ : Union[str, Any] , ):
'''simple docstring'''
__a : Tuple = AddedToken(SCREAMING_SNAKE_CASE__ , lstrip=SCREAMING_SNAKE_CASE__ , rstrip=SCREAMING_SNAKE_CASE__ ) if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) else mask_token
__a : Union[str, Any] = legacy_behaviour
super().__init__(
vocab_file=SCREAMING_SNAKE_CASE__ , tokenizer_file=SCREAMING_SNAKE_CASE__ , bos_token=SCREAMING_SNAKE_CASE__ , eos_token=SCREAMING_SNAKE_CASE__ , sep_token=SCREAMING_SNAKE_CASE__ , cls_token=SCREAMING_SNAKE_CASE__ , unk_token=SCREAMING_SNAKE_CASE__ , pad_token=SCREAMING_SNAKE_CASE__ , mask_token=SCREAMING_SNAKE_CASE__ , src_lang=SCREAMING_SNAKE_CASE__ , tgt_lang=SCREAMING_SNAKE_CASE__ , additional_special_tokens=SCREAMING_SNAKE_CASE__ , legacy_behaviour=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ , )
__a : Any = vocab_file
__a : List[str] = False if not self.vocab_file else True
__a : Union[str, Any] = FAIRSEQ_LANGUAGE_CODES.copy()
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
_additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in _additional_special_tokens] )
self.add_special_tokens({'additional_special_tokens': _additional_special_tokens} )
__a : List[Any] = {
lang_code: self.convert_tokens_to_ids(SCREAMING_SNAKE_CASE__ ) for lang_code in FAIRSEQ_LANGUAGE_CODES
}
__a : Any = src_lang if src_lang is not None else 'eng_Latn'
__a : Union[str, Any] = self.convert_tokens_to_ids(self._src_lang )
__a : List[Any] = tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
@property
def __lowerCAmelCase ( self : Tuple ):
'''simple docstring'''
return self._src_lang
@src_lang.setter
def __lowerCAmelCase ( self : Union[str, Any] , SCREAMING_SNAKE_CASE__ : str ):
'''simple docstring'''
__a : Union[str, Any] = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def __lowerCAmelCase ( self : List[Any] , SCREAMING_SNAKE_CASE__ : List[int] , SCREAMING_SNAKE_CASE__ : Optional[List[int]] = None ):
'''simple docstring'''
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def __lowerCAmelCase ( self : List[Any] , SCREAMING_SNAKE_CASE__ : List[int] , SCREAMING_SNAKE_CASE__ : Optional[List[int]] = None ):
'''simple docstring'''
__a : Dict = [self.sep_token_id]
__a : List[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def __lowerCAmelCase ( self : Union[str, Any] , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Optional[str] , SCREAMING_SNAKE_CASE__ : Optional[str] , **SCREAMING_SNAKE_CASE__ : Any ):
'''simple docstring'''
if src_lang is None or tgt_lang is None:
raise ValueError('Translation requires a `src_lang` and a `tgt_lang` for this model' )
__a : List[Any] = src_lang
__a : Tuple = self(SCREAMING_SNAKE_CASE__ , add_special_tokens=SCREAMING_SNAKE_CASE__ , return_tensors=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
__a : int = self.convert_tokens_to_ids(SCREAMING_SNAKE_CASE__ )
__a : Optional[int] = tgt_lang_id
return inputs
def __lowerCAmelCase ( self : int , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : str = "eng_Latn" , SCREAMING_SNAKE_CASE__ : Optional[List[str]] = None , SCREAMING_SNAKE_CASE__ : str = "fra_Latn" , **SCREAMING_SNAKE_CASE__ : Optional[int] , ):
'''simple docstring'''
__a : Optional[Any] = src_lang
__a : Optional[int] = tgt_lang
return super().prepare_seqaseq_batch(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
def __lowerCAmelCase ( self : List[Any] ):
'''simple docstring'''
return self.set_src_lang_special_tokens(self.src_lang )
def __lowerCAmelCase ( self : Dict ):
'''simple docstring'''
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def __lowerCAmelCase ( self : Optional[int] , SCREAMING_SNAKE_CASE__ : int ):
'''simple docstring'''
__a : Union[str, Any] = self.convert_tokens_to_ids(SCREAMING_SNAKE_CASE__ )
if self.legacy_behaviour:
__a : List[Any] = []
__a : Optional[int] = [self.eos_token_id, self.cur_lang_code]
else:
__a : Dict = [self.cur_lang_code]
__a : Optional[int] = [self.eos_token_id]
__a : str = self.convert_ids_to_tokens(self.prefix_tokens )
__a : int = self.convert_ids_to_tokens(self.suffix_tokens )
__a : str = processors.TemplateProcessing(
single=prefix_tokens_str + ['$A'] + suffix_tokens_str , pair=prefix_tokens_str + ['$A', '$B'] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def __lowerCAmelCase ( self : Any , SCREAMING_SNAKE_CASE__ : str ):
'''simple docstring'''
__a : Union[str, Any] = self.convert_tokens_to_ids(SCREAMING_SNAKE_CASE__ )
if self.legacy_behaviour:
__a : Optional[Any] = []
__a : int = [self.eos_token_id, self.cur_lang_code]
else:
__a : Optional[int] = [self.cur_lang_code]
__a : List[str] = [self.eos_token_id]
__a : Union[str, Any] = self.convert_ids_to_tokens(self.prefix_tokens )
__a : Dict = self.convert_ids_to_tokens(self.suffix_tokens )
__a : Dict = processors.TemplateProcessing(
single=prefix_tokens_str + ['$A'] + suffix_tokens_str , pair=prefix_tokens_str + ['$A', '$B'] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def __lowerCAmelCase ( self : Union[str, Any] , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Optional[str] = None ):
'''simple docstring'''
if not self.can_save_slow_tokenizer:
raise ValueError(
'Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '
'tokenizer.' )
if not os.path.isdir(SCREAMING_SNAKE_CASE__ ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory.''' )
return
__a : Tuple = os.path.join(
SCREAMING_SNAKE_CASE__ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(SCREAMING_SNAKE_CASE__ ):
copyfile(self.vocab_file , SCREAMING_SNAKE_CASE__ )
return (out_vocab_file,)
| 47 |
from ...utils import is_note_seq_available, is_transformers_available, is_torch_available
from ...utils import OptionalDependencyNotAvailable
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .notes_encoder import SpectrogramNotesEncoder
from .continous_encoder import SpectrogramContEncoder
from .pipeline_spectrogram_diffusion import (
SpectrogramContEncoder,
SpectrogramDiffusionPipeline,
TaFilmDecoder,
)
try:
if not (is_transformers_available() and is_torch_available() and is_note_seq_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_transformers_and_torch_and_note_seq_objects import * # noqa F403
else:
from .midi_utils import MidiProcessor
| 47 | 1 |
from __future__ import annotations
from fractions import Fraction
def UpperCAmelCase__ ( lowerCamelCase_ : int , lowerCamelCase_ : int ):
return (
num != den and num % 1_0 == den // 1_0 and (num // 1_0) / (den % 1_0) == num / den
)
def UpperCAmelCase__ ( lowerCamelCase_ : int ):
__a : int = []
__a : int = 1_1
__a : List[Any] = int('1' + '0' * digit_len )
for num in range(lowerCamelCase_ , lowerCamelCase_ ):
while den <= 9_9:
if (num != den) and (num % 1_0 == den // 1_0) and (den % 1_0 != 0):
if is_digit_cancelling(lowerCamelCase_ , lowerCamelCase_ ):
solutions.append(f'''{num}/{den}''' )
den += 1
num += 1
__a : Union[str, Any] = 1_0
return solutions
def UpperCAmelCase__ ( lowerCamelCase_ : int = 2 ):
__a : Optional[Any] = 1.0
for fraction in fraction_list(lowerCamelCase_ ):
__a : int = Fraction(lowerCamelCase_ )
result *= frac.denominator / frac.numerator
return int(lowerCamelCase_ )
if __name__ == "__main__":
print(solution())
| 47 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
SCREAMING_SNAKE_CASE__ = {
'''configuration_bridgetower''': [
'''BRIDGETOWER_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''BridgeTowerConfig''',
'''BridgeTowerTextConfig''',
'''BridgeTowerVisionConfig''',
],
'''processing_bridgetower''': ['''BridgeTowerProcessor'''],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ = ['''BridgeTowerImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ = [
'''BRIDGETOWER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''BridgeTowerForContrastiveLearning''',
'''BridgeTowerForImageAndTextRetrieval''',
'''BridgeTowerForMaskedLM''',
'''BridgeTowerModel''',
'''BridgeTowerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_bridgetower import (
BRIDGETOWER_PRETRAINED_CONFIG_ARCHIVE_MAP,
BridgeTowerConfig,
BridgeTowerTextConfig,
BridgeTowerVisionConfig,
)
from .processing_bridgetower import BridgeTowerProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_bridgetower import BridgeTowerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_bridgetower import (
BRIDGETOWER_PRETRAINED_MODEL_ARCHIVE_LIST,
BridgeTowerForContrastiveLearning,
BridgeTowerForImageAndTextRetrieval,
BridgeTowerForMaskedLM,
BridgeTowerModel,
BridgeTowerPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
| 47 | 1 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.